loom-agent 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of loom-agent might be problematic. Click here for more details.
- loom/__init__.py +77 -0
- loom/agent.py +217 -0
- loom/agents/__init__.py +10 -0
- loom/agents/refs.py +28 -0
- loom/agents/registry.py +50 -0
- loom/builtin/compression/__init__.py +4 -0
- loom/builtin/compression/structured.py +79 -0
- loom/builtin/embeddings/__init__.py +9 -0
- loom/builtin/embeddings/openai_embedding.py +135 -0
- loom/builtin/embeddings/sentence_transformers_embedding.py +145 -0
- loom/builtin/llms/__init__.py +8 -0
- loom/builtin/llms/mock.py +34 -0
- loom/builtin/llms/openai.py +168 -0
- loom/builtin/llms/rule.py +102 -0
- loom/builtin/memory/__init__.py +5 -0
- loom/builtin/memory/in_memory.py +21 -0
- loom/builtin/memory/persistent_memory.py +278 -0
- loom/builtin/retriever/__init__.py +9 -0
- loom/builtin/retriever/chroma_store.py +265 -0
- loom/builtin/retriever/in_memory.py +106 -0
- loom/builtin/retriever/milvus_store.py +307 -0
- loom/builtin/retriever/pinecone_store.py +237 -0
- loom/builtin/retriever/qdrant_store.py +274 -0
- loom/builtin/retriever/vector_store.py +128 -0
- loom/builtin/retriever/vector_store_config.py +217 -0
- loom/builtin/tools/__init__.py +32 -0
- loom/builtin/tools/calculator.py +49 -0
- loom/builtin/tools/document_search.py +111 -0
- loom/builtin/tools/glob.py +27 -0
- loom/builtin/tools/grep.py +56 -0
- loom/builtin/tools/http_request.py +86 -0
- loom/builtin/tools/python_repl.py +73 -0
- loom/builtin/tools/read_file.py +32 -0
- loom/builtin/tools/task.py +158 -0
- loom/builtin/tools/web_search.py +64 -0
- loom/builtin/tools/write_file.py +31 -0
- loom/callbacks/base.py +9 -0
- loom/callbacks/logging.py +12 -0
- loom/callbacks/metrics.py +27 -0
- loom/callbacks/observability.py +248 -0
- loom/components/agent.py +107 -0
- loom/core/agent_executor.py +450 -0
- loom/core/circuit_breaker.py +178 -0
- loom/core/compression_manager.py +329 -0
- loom/core/context_retriever.py +185 -0
- loom/core/error_classifier.py +193 -0
- loom/core/errors.py +66 -0
- loom/core/message_queue.py +167 -0
- loom/core/permission_store.py +62 -0
- loom/core/permissions.py +69 -0
- loom/core/scheduler.py +125 -0
- loom/core/steering_control.py +47 -0
- loom/core/structured_logger.py +279 -0
- loom/core/subagent_pool.py +232 -0
- loom/core/system_prompt.py +141 -0
- loom/core/system_reminders.py +283 -0
- loom/core/tool_pipeline.py +113 -0
- loom/core/types.py +269 -0
- loom/interfaces/compressor.py +59 -0
- loom/interfaces/embedding.py +51 -0
- loom/interfaces/llm.py +33 -0
- loom/interfaces/memory.py +29 -0
- loom/interfaces/retriever.py +179 -0
- loom/interfaces/tool.py +27 -0
- loom/interfaces/vector_store.py +80 -0
- loom/llm/__init__.py +14 -0
- loom/llm/config.py +228 -0
- loom/llm/factory.py +111 -0
- loom/llm/model_health.py +235 -0
- loom/llm/model_pool_advanced.py +305 -0
- loom/llm/pool.py +170 -0
- loom/llm/registry.py +201 -0
- loom/mcp/__init__.py +4 -0
- loom/mcp/client.py +86 -0
- loom/mcp/registry.py +58 -0
- loom/mcp/tool_adapter.py +48 -0
- loom/observability/__init__.py +5 -0
- loom/patterns/__init__.py +5 -0
- loom/patterns/multi_agent.py +123 -0
- loom/patterns/rag.py +262 -0
- loom/plugins/registry.py +55 -0
- loom/resilience/__init__.py +5 -0
- loom/tooling.py +72 -0
- loom/utils/agent_loader.py +218 -0
- loom/utils/token_counter.py +19 -0
- loom_agent-0.0.1.dist-info/METADATA +457 -0
- loom_agent-0.0.1.dist-info/RECORD +89 -0
- loom_agent-0.0.1.dist-info/WHEEL +4 -0
- loom_agent-0.0.1.dist-info/licenses/LICENSE +21 -0
loom/llm/pool.py
ADDED
|
@@ -0,0 +1,170 @@
|
|
|
1
|
+
"""模型池管理系统
|
|
2
|
+
|
|
3
|
+
框架级的多模型管理机制,允许用户:
|
|
4
|
+
1. 配置多个 LLM 模型
|
|
5
|
+
2. 为每个模型设置别名和能力
|
|
6
|
+
3. Agent 根据任务需求自动选择合适的模型
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
from typing import Dict, Optional, Callable, Any
|
|
12
|
+
from dataclasses import dataclass
|
|
13
|
+
from loom.interfaces.llm import BaseLLM
|
|
14
|
+
from .config import LLMCapabilities
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass
|
|
18
|
+
class ModelEntry:
|
|
19
|
+
name: str
|
|
20
|
+
llm: BaseLLM
|
|
21
|
+
capabilities: LLMCapabilities
|
|
22
|
+
metadata: Dict[str, Any] = None
|
|
23
|
+
|
|
24
|
+
def __post_init__(self):
|
|
25
|
+
if self.metadata is None:
|
|
26
|
+
self.metadata = {}
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ModelPool:
|
|
30
|
+
def __init__(self, default_model: Optional[str] = None):
|
|
31
|
+
self._models: Dict[str, ModelEntry] = {}
|
|
32
|
+
self._default_model = default_model
|
|
33
|
+
|
|
34
|
+
def add(
|
|
35
|
+
self,
|
|
36
|
+
name: str,
|
|
37
|
+
llm: BaseLLM,
|
|
38
|
+
capabilities: Optional[LLMCapabilities] = None,
|
|
39
|
+
**metadata
|
|
40
|
+
) -> None:
|
|
41
|
+
if capabilities is None:
|
|
42
|
+
if hasattr(llm, 'capabilities'):
|
|
43
|
+
capabilities = llm.capabilities # type: ignore[attr-defined]
|
|
44
|
+
else:
|
|
45
|
+
capabilities = LLMCapabilities()
|
|
46
|
+
|
|
47
|
+
entry = ModelEntry(
|
|
48
|
+
name=name,
|
|
49
|
+
llm=llm,
|
|
50
|
+
capabilities=capabilities,
|
|
51
|
+
metadata=metadata
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
self._models[name] = entry
|
|
55
|
+
if len(self._models) == 1 and self._default_model is None:
|
|
56
|
+
self._default_model = name
|
|
57
|
+
|
|
58
|
+
def get(self, name: str) -> Optional[BaseLLM]:
|
|
59
|
+
entry = self._models.get(name)
|
|
60
|
+
return entry.llm if entry else None
|
|
61
|
+
|
|
62
|
+
def get_default(self) -> Optional[BaseLLM]:
|
|
63
|
+
if self._default_model:
|
|
64
|
+
return self.get(self._default_model)
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
def set_default(self, name: str) -> None:
|
|
68
|
+
if name not in self._models:
|
|
69
|
+
raise ValueError(f"Model '{name}' not found in pool")
|
|
70
|
+
self._default_model = name
|
|
71
|
+
|
|
72
|
+
def select_by_capabilities(
|
|
73
|
+
self,
|
|
74
|
+
required_capabilities: LLMCapabilities,
|
|
75
|
+
prefer_default: bool = False
|
|
76
|
+
) -> Optional[BaseLLM]:
|
|
77
|
+
if prefer_default and self._default_model:
|
|
78
|
+
default_entry = self._models[self._default_model]
|
|
79
|
+
if self._capabilities_match(default_entry.capabilities, required_capabilities):
|
|
80
|
+
return default_entry.llm
|
|
81
|
+
|
|
82
|
+
for entry in self._models.values():
|
|
83
|
+
if self._capabilities_match(entry.capabilities, required_capabilities):
|
|
84
|
+
return entry.llm
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
def select(
|
|
88
|
+
self,
|
|
89
|
+
selector: Callable[[Dict[str, ModelEntry]], Optional[str]] = None,
|
|
90
|
+
**requirements
|
|
91
|
+
) -> Optional[BaseLLM]:
|
|
92
|
+
if selector:
|
|
93
|
+
selected_name = selector(self._models)
|
|
94
|
+
return self.get(selected_name) if selected_name else None
|
|
95
|
+
|
|
96
|
+
if requirements:
|
|
97
|
+
required_caps = LLMCapabilities(**requirements)
|
|
98
|
+
return self.select_by_capabilities(required_caps)
|
|
99
|
+
return self.get_default()
|
|
100
|
+
|
|
101
|
+
def list_models(self) -> Dict[str, ModelEntry]:
|
|
102
|
+
return dict(self._models)
|
|
103
|
+
|
|
104
|
+
def remove(self, name: str) -> None:
|
|
105
|
+
if name in self._models:
|
|
106
|
+
del self._models[name]
|
|
107
|
+
if self._default_model == name:
|
|
108
|
+
self._default_model = next(iter(self._models), None)
|
|
109
|
+
|
|
110
|
+
def __len__(self) -> int:
|
|
111
|
+
return len(self._models)
|
|
112
|
+
|
|
113
|
+
def __contains__(self, name: str) -> bool:
|
|
114
|
+
return name in self._models
|
|
115
|
+
|
|
116
|
+
@staticmethod
|
|
117
|
+
def _capabilities_match(
|
|
118
|
+
model_caps: LLMCapabilities,
|
|
119
|
+
required_caps: LLMCapabilities
|
|
120
|
+
) -> bool:
|
|
121
|
+
if required_caps.supports_tools and not model_caps.supports_tools:
|
|
122
|
+
return False
|
|
123
|
+
if required_caps.supports_vision and not model_caps.supports_vision:
|
|
124
|
+
return False
|
|
125
|
+
if required_caps.supports_json_mode and not model_caps.supports_json_mode:
|
|
126
|
+
return False
|
|
127
|
+
if required_caps.max_tokens > model_caps.max_tokens:
|
|
128
|
+
return False
|
|
129
|
+
if required_caps.context_window > model_caps.context_window:
|
|
130
|
+
return False
|
|
131
|
+
return True
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
class CapabilityBasedSelector:
|
|
135
|
+
def __init__(
|
|
136
|
+
self,
|
|
137
|
+
prefer_default: bool = True,
|
|
138
|
+
fallback_to_default: bool = True
|
|
139
|
+
):
|
|
140
|
+
self.prefer_default = prefer_default
|
|
141
|
+
self.fallback_to_default = fallback_to_default
|
|
142
|
+
|
|
143
|
+
def select(
|
|
144
|
+
self,
|
|
145
|
+
pool: ModelPool,
|
|
146
|
+
task_context: Optional[Dict[str, Any]] = None
|
|
147
|
+
) -> Optional[BaseLLM]:
|
|
148
|
+
if not task_context:
|
|
149
|
+
return pool.get_default()
|
|
150
|
+
|
|
151
|
+
required_caps = self._infer_capabilities(task_context)
|
|
152
|
+
selected = pool.select_by_capabilities(
|
|
153
|
+
required_caps,
|
|
154
|
+
prefer_default=self.prefer_default
|
|
155
|
+
)
|
|
156
|
+
if selected is None and self.fallback_to_default:
|
|
157
|
+
selected = pool.get_default()
|
|
158
|
+
return selected
|
|
159
|
+
|
|
160
|
+
def _infer_capabilities(
|
|
161
|
+
self,
|
|
162
|
+
task_context: Dict[str, Any]
|
|
163
|
+
) -> LLMCapabilities:
|
|
164
|
+
return LLMCapabilities(
|
|
165
|
+
supports_vision=task_context.get("has_image", False),
|
|
166
|
+
supports_tools=task_context.get("needs_tools", False),
|
|
167
|
+
context_window=task_context.get("context_size", 8192),
|
|
168
|
+
max_tokens=task_context.get("max_output", 4096)
|
|
169
|
+
)
|
|
170
|
+
|
loom/llm/registry.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""模型能力注册表
|
|
2
|
+
|
|
3
|
+
维护各个 LLM 提供商和模型的能力信息,用于能力检测与查询。
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from __future__ import annotations
|
|
7
|
+
|
|
8
|
+
from typing import Dict
|
|
9
|
+
from .config import LLMCapabilities, LLMProvider
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ModelRegistry:
|
|
13
|
+
OPENAI_MODELS: Dict[str, LLMCapabilities] = {
|
|
14
|
+
"gpt-4": LLMCapabilities(
|
|
15
|
+
supports_tools=True,
|
|
16
|
+
supports_vision=False,
|
|
17
|
+
supports_streaming=True,
|
|
18
|
+
supports_json_mode=True,
|
|
19
|
+
supports_system_message=True,
|
|
20
|
+
max_tokens=8192,
|
|
21
|
+
context_window=8192,
|
|
22
|
+
),
|
|
23
|
+
"gpt-4-turbo": LLMCapabilities(
|
|
24
|
+
supports_tools=True,
|
|
25
|
+
supports_vision=True,
|
|
26
|
+
supports_streaming=True,
|
|
27
|
+
supports_json_mode=True,
|
|
28
|
+
supports_system_message=True,
|
|
29
|
+
max_tokens=4096,
|
|
30
|
+
context_window=128000,
|
|
31
|
+
),
|
|
32
|
+
"gpt-4o": LLMCapabilities(
|
|
33
|
+
supports_tools=True,
|
|
34
|
+
supports_vision=True,
|
|
35
|
+
supports_streaming=True,
|
|
36
|
+
supports_json_mode=True,
|
|
37
|
+
supports_system_message=True,
|
|
38
|
+
max_tokens=16384,
|
|
39
|
+
context_window=128000,
|
|
40
|
+
),
|
|
41
|
+
"gpt-4o-mini": LLMCapabilities(
|
|
42
|
+
supports_tools=True,
|
|
43
|
+
supports_vision=True,
|
|
44
|
+
supports_streaming=True,
|
|
45
|
+
supports_json_mode=True,
|
|
46
|
+
supports_system_message=True,
|
|
47
|
+
max_tokens=16384,
|
|
48
|
+
context_window=128000,
|
|
49
|
+
),
|
|
50
|
+
"gpt-3.5-turbo": LLMCapabilities(
|
|
51
|
+
supports_tools=True,
|
|
52
|
+
supports_vision=False,
|
|
53
|
+
supports_streaming=True,
|
|
54
|
+
supports_json_mode=True,
|
|
55
|
+
supports_system_message=True,
|
|
56
|
+
max_tokens=4096,
|
|
57
|
+
context_window=16385,
|
|
58
|
+
),
|
|
59
|
+
}
|
|
60
|
+
|
|
61
|
+
ANTHROPIC_MODELS: Dict[str, LLMCapabilities] = {
|
|
62
|
+
"claude-3-5-sonnet-20241022": LLMCapabilities(
|
|
63
|
+
supports_tools=True,
|
|
64
|
+
supports_vision=True,
|
|
65
|
+
supports_streaming=True,
|
|
66
|
+
supports_json_mode=False,
|
|
67
|
+
supports_system_message=True,
|
|
68
|
+
max_tokens=8192,
|
|
69
|
+
context_window=200000,
|
|
70
|
+
),
|
|
71
|
+
"claude-3-sonnet-20240229": LLMCapabilities(
|
|
72
|
+
supports_tools=True,
|
|
73
|
+
supports_vision=True,
|
|
74
|
+
supports_streaming=True,
|
|
75
|
+
supports_json_mode=False,
|
|
76
|
+
supports_system_message=True,
|
|
77
|
+
max_tokens=4096,
|
|
78
|
+
context_window=200000,
|
|
79
|
+
),
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
GOOGLE_MODELS: Dict[str, LLMCapabilities] = {
|
|
83
|
+
"gemini-pro": LLMCapabilities(
|
|
84
|
+
supports_tools=True,
|
|
85
|
+
supports_vision=False,
|
|
86
|
+
supports_streaming=True,
|
|
87
|
+
supports_json_mode=False,
|
|
88
|
+
supports_system_message=True,
|
|
89
|
+
max_tokens=8192,
|
|
90
|
+
context_window=32760,
|
|
91
|
+
),
|
|
92
|
+
"gemini-1.5-pro": LLMCapabilities(
|
|
93
|
+
supports_tools=True,
|
|
94
|
+
supports_vision=True,
|
|
95
|
+
supports_streaming=True,
|
|
96
|
+
supports_json_mode=False,
|
|
97
|
+
supports_system_message=True,
|
|
98
|
+
max_tokens=8192,
|
|
99
|
+
context_window=1000000,
|
|
100
|
+
),
|
|
101
|
+
}
|
|
102
|
+
|
|
103
|
+
COHERE_MODELS: Dict[str, LLMCapabilities] = {
|
|
104
|
+
"command-r-plus": LLMCapabilities(
|
|
105
|
+
supports_tools=True,
|
|
106
|
+
supports_vision=False,
|
|
107
|
+
supports_streaming=True,
|
|
108
|
+
supports_json_mode=False,
|
|
109
|
+
supports_system_message=True,
|
|
110
|
+
max_tokens=4096,
|
|
111
|
+
context_window=128000,
|
|
112
|
+
),
|
|
113
|
+
"command-r": LLMCapabilities(
|
|
114
|
+
supports_tools=True,
|
|
115
|
+
supports_vision=False,
|
|
116
|
+
supports_streaming=True,
|
|
117
|
+
supports_json_mode=False,
|
|
118
|
+
supports_system_message=True,
|
|
119
|
+
max_tokens=4096,
|
|
120
|
+
context_window=128000,
|
|
121
|
+
),
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
DEFAULT_CAPABILITIES = LLMCapabilities(
|
|
125
|
+
supports_tools=False,
|
|
126
|
+
supports_vision=False,
|
|
127
|
+
supports_streaming=True,
|
|
128
|
+
supports_json_mode=False,
|
|
129
|
+
supports_system_message=True,
|
|
130
|
+
max_tokens=4096,
|
|
131
|
+
context_window=8192,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
@classmethod
|
|
135
|
+
def get_capabilities(cls, provider: str, model_name: str) -> LLMCapabilities:
|
|
136
|
+
provider = provider.lower()
|
|
137
|
+
|
|
138
|
+
if provider == LLMProvider.OPENAI or provider == "azure_openai":
|
|
139
|
+
return cls._get_with_fuzzy_match(cls.OPENAI_MODELS, model_name)
|
|
140
|
+
elif provider == LLMProvider.ANTHROPIC:
|
|
141
|
+
return cls._get_with_fuzzy_match(cls.ANTHROPIC_MODELS, model_name)
|
|
142
|
+
elif provider == LLMProvider.GOOGLE:
|
|
143
|
+
return cls._get_with_fuzzy_match(cls.GOOGLE_MODELS, model_name)
|
|
144
|
+
elif provider == LLMProvider.COHERE:
|
|
145
|
+
return cls._get_with_fuzzy_match(cls.COHERE_MODELS, model_name)
|
|
146
|
+
elif provider == LLMProvider.OLLAMA:
|
|
147
|
+
return LLMCapabilities(
|
|
148
|
+
supports_tools=False,
|
|
149
|
+
supports_vision=False,
|
|
150
|
+
supports_streaming=True,
|
|
151
|
+
supports_json_mode=False,
|
|
152
|
+
supports_system_message=True,
|
|
153
|
+
max_tokens=2048,
|
|
154
|
+
context_window=8192,
|
|
155
|
+
)
|
|
156
|
+
else:
|
|
157
|
+
return cls.DEFAULT_CAPABILITIES
|
|
158
|
+
|
|
159
|
+
@classmethod
|
|
160
|
+
def supports_tools(cls, provider: str, model_name: str) -> bool:
|
|
161
|
+
return cls.get_capabilities(provider, model_name).supports_tools
|
|
162
|
+
|
|
163
|
+
@classmethod
|
|
164
|
+
def supports_vision(cls, provider: str, model_name: str) -> bool:
|
|
165
|
+
return cls.get_capabilities(provider, model_name).supports_vision
|
|
166
|
+
|
|
167
|
+
@classmethod
|
|
168
|
+
def get_context_window(cls, provider: str, model_name: str) -> int:
|
|
169
|
+
return cls.get_capabilities(provider, model_name).context_window
|
|
170
|
+
|
|
171
|
+
@classmethod
|
|
172
|
+
def register_model(
|
|
173
|
+
cls,
|
|
174
|
+
provider: str,
|
|
175
|
+
model_name: str,
|
|
176
|
+
capabilities: LLMCapabilities
|
|
177
|
+
):
|
|
178
|
+
provider = provider.lower()
|
|
179
|
+
if provider == LLMProvider.OPENAI:
|
|
180
|
+
cls.OPENAI_MODELS[model_name] = capabilities
|
|
181
|
+
elif provider == LLMProvider.ANTHROPIC:
|
|
182
|
+
cls.ANTHROPIC_MODELS[model_name] = capabilities
|
|
183
|
+
elif provider == LLMProvider.GOOGLE:
|
|
184
|
+
cls.GOOGLE_MODELS[model_name] = capabilities
|
|
185
|
+
elif provider == LLMProvider.COHERE:
|
|
186
|
+
cls.COHERE_MODELS[model_name] = capabilities
|
|
187
|
+
|
|
188
|
+
@classmethod
|
|
189
|
+
def _get_with_fuzzy_match(
|
|
190
|
+
cls,
|
|
191
|
+
models: Dict[str, LLMCapabilities],
|
|
192
|
+
model_name: str
|
|
193
|
+
) -> LLMCapabilities:
|
|
194
|
+
if model_name in models:
|
|
195
|
+
return models[model_name]
|
|
196
|
+
model_name_lower = model_name.lower()
|
|
197
|
+
for key, value in models.items():
|
|
198
|
+
if model_name_lower in key.lower() or key.lower() in model_name_lower:
|
|
199
|
+
return value
|
|
200
|
+
return cls.DEFAULT_CAPABILITIES
|
|
201
|
+
|
loom/mcp/__init__.py
ADDED
loom/mcp/client.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import json
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from typing import Any, Dict, List, Optional
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass
|
|
10
|
+
class MCPServerConfig:
|
|
11
|
+
command: str
|
|
12
|
+
args: List[str]
|
|
13
|
+
env: Optional[Dict[str, str]] = None
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class MCPError(Exception):
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class MCPClient:
|
|
21
|
+
"""最小 MCP 客户端:JSON-RPC over stdio。"""
|
|
22
|
+
|
|
23
|
+
def __init__(self, server_config: MCPServerConfig) -> None:
|
|
24
|
+
self.config = server_config
|
|
25
|
+
self.process: Optional[asyncio.subprocess.Process] = None
|
|
26
|
+
self.request_id = 0
|
|
27
|
+
self._response_futures: Dict[int, asyncio.Future] = {}
|
|
28
|
+
|
|
29
|
+
async def connect(self) -> None:
|
|
30
|
+
self.process = await asyncio.create_subprocess_exec(
|
|
31
|
+
self.config.command,
|
|
32
|
+
*self.config.args,
|
|
33
|
+
stdin=asyncio.subprocess.PIPE,
|
|
34
|
+
stdout=asyncio.subprocess.PIPE,
|
|
35
|
+
stderr=asyncio.subprocess.PIPE,
|
|
36
|
+
env=self.config.env,
|
|
37
|
+
)
|
|
38
|
+
asyncio.create_task(self._read_responses())
|
|
39
|
+
await self._send_request(
|
|
40
|
+
"initialize",
|
|
41
|
+
{
|
|
42
|
+
"protocolVersion": "2024-11-05",
|
|
43
|
+
"capabilities": {},
|
|
44
|
+
"clientInfo": {"name": "loom-framework", "version": "0.1.0"},
|
|
45
|
+
},
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
async def disconnect(self) -> None:
|
|
49
|
+
if self.process:
|
|
50
|
+
self.process.terminate()
|
|
51
|
+
await self.process.wait()
|
|
52
|
+
|
|
53
|
+
async def list_tools(self) -> List[Dict]:
|
|
54
|
+
result = await self._send_request("tools/list", {})
|
|
55
|
+
return result.get("tools", [])
|
|
56
|
+
|
|
57
|
+
async def call_tool(self, name: str, arguments: Dict) -> Dict:
|
|
58
|
+
return await self._send_request("tools/call", {"name": name, "arguments": arguments})
|
|
59
|
+
|
|
60
|
+
async def _send_request(self, method: str, params: Dict) -> Dict:
|
|
61
|
+
assert self.process and self.process.stdin and self.process.stdout
|
|
62
|
+
self.request_id += 1
|
|
63
|
+
req = {"jsonrpc": "2.0", "id": self.request_id, "method": method, "params": params}
|
|
64
|
+
fut: asyncio.Future = asyncio.get_event_loop().create_future()
|
|
65
|
+
self._response_futures[self.request_id] = fut
|
|
66
|
+
self.process.stdin.write((json.dumps(req) + "\n").encode())
|
|
67
|
+
await self.process.stdin.drain()
|
|
68
|
+
return await fut
|
|
69
|
+
|
|
70
|
+
async def _read_responses(self) -> None:
|
|
71
|
+
assert self.process and self.process.stdout
|
|
72
|
+
while True:
|
|
73
|
+
line = await self.process.stdout.readline()
|
|
74
|
+
if not line:
|
|
75
|
+
break
|
|
76
|
+
resp = json.loads(line.decode())
|
|
77
|
+
if "id" in resp:
|
|
78
|
+
rid = resp["id"]
|
|
79
|
+
fut = self._response_futures.pop(rid, None)
|
|
80
|
+
if fut is None:
|
|
81
|
+
continue
|
|
82
|
+
if "error" in resp:
|
|
83
|
+
fut.set_exception(MCPError(str(resp["error"])))
|
|
84
|
+
else:
|
|
85
|
+
fut.set_result(resp.get("result", {}))
|
|
86
|
+
|
loom/mcp/registry.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, List, Optional
|
|
6
|
+
|
|
7
|
+
from .client import MCPClient, MCPServerConfig
|
|
8
|
+
from .tool_adapter import MCPTool
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MCPToolRegistry:
|
|
12
|
+
"""MCP 工具注册中心 - 负责发现/连接 server 并加载工具。"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, config_path: Optional[str] = None) -> None:
|
|
15
|
+
self.config_path = Path(config_path) if config_path else Path.home() / ".loom" / "mcp.json"
|
|
16
|
+
self.servers: Dict[str, MCPClient] = {}
|
|
17
|
+
self.tools: Dict[str, MCPTool] = {}
|
|
18
|
+
|
|
19
|
+
async def discover_local_servers(self) -> None:
|
|
20
|
+
if not self.config_path.exists():
|
|
21
|
+
return
|
|
22
|
+
config = json.loads(self.config_path.read_text())
|
|
23
|
+
for name, scfg in config.get("mcpServers", {}).items():
|
|
24
|
+
await self.add_server(name, scfg)
|
|
25
|
+
|
|
26
|
+
async def add_server(self, name: str, cfg: Dict) -> None:
|
|
27
|
+
client = MCPClient(
|
|
28
|
+
MCPServerConfig(command=cfg["command"], args=cfg.get("args", []), env=cfg.get("env"))
|
|
29
|
+
)
|
|
30
|
+
await client.connect()
|
|
31
|
+
self.servers[name] = client
|
|
32
|
+
await self._load_server_tools(name, client)
|
|
33
|
+
|
|
34
|
+
async def _load_server_tools(self, server_name: str, client: MCPClient) -> None:
|
|
35
|
+
for spec in await client.list_tools():
|
|
36
|
+
tool = MCPTool(mcp_tool_spec=spec, mcp_client=client)
|
|
37
|
+
self.tools[f"{server_name}:{tool.name}"] = tool
|
|
38
|
+
|
|
39
|
+
async def load_from_server(self, server_name: str) -> List[MCPTool]:
|
|
40
|
+
return [tool for key, tool in self.tools.items() if key.startswith(f"{server_name}:")]
|
|
41
|
+
|
|
42
|
+
async def load_servers(self, server_names: List[str]) -> List[MCPTool]:
|
|
43
|
+
tools: List[MCPTool] = []
|
|
44
|
+
for s in server_names:
|
|
45
|
+
tools.extend(await self.load_from_server(s))
|
|
46
|
+
return tools
|
|
47
|
+
|
|
48
|
+
async def list_all_tools(self) -> Dict[str, List[str]]:
|
|
49
|
+
grouped: Dict[str, List[str]] = {}
|
|
50
|
+
for key in self.tools.keys():
|
|
51
|
+
server, tname = key.split(":", 1)
|
|
52
|
+
grouped.setdefault(server, []).append(tname)
|
|
53
|
+
return grouped
|
|
54
|
+
|
|
55
|
+
async def close(self) -> None:
|
|
56
|
+
for client in self.servers.values():
|
|
57
|
+
await client.disconnect()
|
|
58
|
+
|
loom/mcp/tool_adapter.py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any, Dict
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel, Field, create_model
|
|
6
|
+
|
|
7
|
+
from loom.interfaces.tool import BaseTool
|
|
8
|
+
from .client import MCPClient
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MCPTool(BaseTool):
|
|
12
|
+
"""MCP 工具适配为 Loom 工具。"""
|
|
13
|
+
|
|
14
|
+
def __init__(self, mcp_tool_spec: Dict, mcp_client: MCPClient) -> None:
|
|
15
|
+
self.mcp_spec = mcp_tool_spec
|
|
16
|
+
self.mcp_client = mcp_client
|
|
17
|
+
self.name = mcp_tool_spec["name"]
|
|
18
|
+
self.description = mcp_tool_spec.get("description", "")
|
|
19
|
+
self.args_schema = self._build_pydantic_schema(mcp_tool_spec.get("inputSchema", {}))
|
|
20
|
+
|
|
21
|
+
def _build_pydantic_schema(self, json_schema: Dict) -> type[BaseModel]:
|
|
22
|
+
properties = json_schema.get("properties", {})
|
|
23
|
+
required = set(json_schema.get("required", []))
|
|
24
|
+
fields: Dict[str, tuple[type, Any]] = {}
|
|
25
|
+
for fname, spec in properties.items():
|
|
26
|
+
py_type = self._json_type_to_python(spec.get("type", "string"))
|
|
27
|
+
desc = spec.get("description", "")
|
|
28
|
+
default = ... if fname in required else spec.get("default", None)
|
|
29
|
+
fields[fname] = (py_type, Field(default, description=desc))
|
|
30
|
+
return create_model(f"{self.name.title()}Args", **fields) # type: ignore[arg-type]
|
|
31
|
+
|
|
32
|
+
def _json_type_to_python(self, json_type: str) -> type:
|
|
33
|
+
return {
|
|
34
|
+
"string": str,
|
|
35
|
+
"integer": int,
|
|
36
|
+
"number": float,
|
|
37
|
+
"boolean": bool,
|
|
38
|
+
"array": list,
|
|
39
|
+
"object": dict,
|
|
40
|
+
}.get(json_type, str)
|
|
41
|
+
|
|
42
|
+
async def run(self, **kwargs) -> Any:
|
|
43
|
+
result = await self.mcp_client.call_tool(self.name, kwargs)
|
|
44
|
+
content = result.get("content", [])
|
|
45
|
+
if isinstance(content, list) and content:
|
|
46
|
+
return content[0].get("text", "")
|
|
47
|
+
return result
|
|
48
|
+
|