loom-agent 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of loom-agent might be problematic. Click here for more details.

Files changed (89) hide show
  1. loom/__init__.py +77 -0
  2. loom/agent.py +217 -0
  3. loom/agents/__init__.py +10 -0
  4. loom/agents/refs.py +28 -0
  5. loom/agents/registry.py +50 -0
  6. loom/builtin/compression/__init__.py +4 -0
  7. loom/builtin/compression/structured.py +79 -0
  8. loom/builtin/embeddings/__init__.py +9 -0
  9. loom/builtin/embeddings/openai_embedding.py +135 -0
  10. loom/builtin/embeddings/sentence_transformers_embedding.py +145 -0
  11. loom/builtin/llms/__init__.py +8 -0
  12. loom/builtin/llms/mock.py +34 -0
  13. loom/builtin/llms/openai.py +168 -0
  14. loom/builtin/llms/rule.py +102 -0
  15. loom/builtin/memory/__init__.py +5 -0
  16. loom/builtin/memory/in_memory.py +21 -0
  17. loom/builtin/memory/persistent_memory.py +278 -0
  18. loom/builtin/retriever/__init__.py +9 -0
  19. loom/builtin/retriever/chroma_store.py +265 -0
  20. loom/builtin/retriever/in_memory.py +106 -0
  21. loom/builtin/retriever/milvus_store.py +307 -0
  22. loom/builtin/retriever/pinecone_store.py +237 -0
  23. loom/builtin/retriever/qdrant_store.py +274 -0
  24. loom/builtin/retriever/vector_store.py +128 -0
  25. loom/builtin/retriever/vector_store_config.py +217 -0
  26. loom/builtin/tools/__init__.py +32 -0
  27. loom/builtin/tools/calculator.py +49 -0
  28. loom/builtin/tools/document_search.py +111 -0
  29. loom/builtin/tools/glob.py +27 -0
  30. loom/builtin/tools/grep.py +56 -0
  31. loom/builtin/tools/http_request.py +86 -0
  32. loom/builtin/tools/python_repl.py +73 -0
  33. loom/builtin/tools/read_file.py +32 -0
  34. loom/builtin/tools/task.py +158 -0
  35. loom/builtin/tools/web_search.py +64 -0
  36. loom/builtin/tools/write_file.py +31 -0
  37. loom/callbacks/base.py +9 -0
  38. loom/callbacks/logging.py +12 -0
  39. loom/callbacks/metrics.py +27 -0
  40. loom/callbacks/observability.py +248 -0
  41. loom/components/agent.py +107 -0
  42. loom/core/agent_executor.py +450 -0
  43. loom/core/circuit_breaker.py +178 -0
  44. loom/core/compression_manager.py +329 -0
  45. loom/core/context_retriever.py +185 -0
  46. loom/core/error_classifier.py +193 -0
  47. loom/core/errors.py +66 -0
  48. loom/core/message_queue.py +167 -0
  49. loom/core/permission_store.py +62 -0
  50. loom/core/permissions.py +69 -0
  51. loom/core/scheduler.py +125 -0
  52. loom/core/steering_control.py +47 -0
  53. loom/core/structured_logger.py +279 -0
  54. loom/core/subagent_pool.py +232 -0
  55. loom/core/system_prompt.py +141 -0
  56. loom/core/system_reminders.py +283 -0
  57. loom/core/tool_pipeline.py +113 -0
  58. loom/core/types.py +269 -0
  59. loom/interfaces/compressor.py +59 -0
  60. loom/interfaces/embedding.py +51 -0
  61. loom/interfaces/llm.py +33 -0
  62. loom/interfaces/memory.py +29 -0
  63. loom/interfaces/retriever.py +179 -0
  64. loom/interfaces/tool.py +27 -0
  65. loom/interfaces/vector_store.py +80 -0
  66. loom/llm/__init__.py +14 -0
  67. loom/llm/config.py +228 -0
  68. loom/llm/factory.py +111 -0
  69. loom/llm/model_health.py +235 -0
  70. loom/llm/model_pool_advanced.py +305 -0
  71. loom/llm/pool.py +170 -0
  72. loom/llm/registry.py +201 -0
  73. loom/mcp/__init__.py +4 -0
  74. loom/mcp/client.py +86 -0
  75. loom/mcp/registry.py +58 -0
  76. loom/mcp/tool_adapter.py +48 -0
  77. loom/observability/__init__.py +5 -0
  78. loom/patterns/__init__.py +5 -0
  79. loom/patterns/multi_agent.py +123 -0
  80. loom/patterns/rag.py +262 -0
  81. loom/plugins/registry.py +55 -0
  82. loom/resilience/__init__.py +5 -0
  83. loom/tooling.py +72 -0
  84. loom/utils/agent_loader.py +218 -0
  85. loom/utils/token_counter.py +19 -0
  86. loom_agent-0.0.1.dist-info/METADATA +457 -0
  87. loom_agent-0.0.1.dist-info/RECORD +89 -0
  88. loom_agent-0.0.1.dist-info/WHEEL +4 -0
  89. loom_agent-0.0.1.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,145 @@
1
+ """Sentence Transformers Embedding 适配器(本地模型)"""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import List
6
+
7
+ from loom.interfaces.embedding import BaseEmbedding
8
+
9
+ try:
10
+ from sentence_transformers import SentenceTransformer
11
+ SENTENCE_TRANSFORMERS_AVAILABLE = True
12
+ except ImportError:
13
+ SENTENCE_TRANSFORMERS_AVAILABLE = False
14
+
15
+
16
+ class SentenceTransformersEmbedding(BaseEmbedding):
17
+ """
18
+ Sentence Transformers 本地 Embedding 适配器
19
+
20
+ 特点:
21
+ - ✅ 完全本地运行,无需 API
22
+ - ✅ 支持多种预训练模型
23
+ - ✅ 支持多语言
24
+ - ✅ GPU 加速支持
25
+
26
+ 推荐模型:
27
+ - all-MiniLM-L6-v2 (384 维, 快速)
28
+ - all-mpnet-base-v2 (768 维, 平衡)
29
+ - paraphrase-multilingual-MiniLM-L12-v2 (384 维, 多语言)
30
+
31
+ 示例:
32
+ from loom.builtin.embeddings import SentenceTransformersEmbedding
33
+
34
+ # 英文模型
35
+ embedding = SentenceTransformersEmbedding(
36
+ model_name="all-MiniLM-L6-v2"
37
+ )
38
+
39
+ # 多语言模型
40
+ embedding = SentenceTransformersEmbedding(
41
+ model_name="paraphrase-multilingual-MiniLM-L12-v2"
42
+ )
43
+
44
+ # 单个文本
45
+ vector = await embedding.embed_query("Hello world")
46
+
47
+ # 批量文本
48
+ vectors = await embedding.embed_documents([
49
+ "Hello world",
50
+ "Loom framework"
51
+ ])
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ model_name: str = "all-MiniLM-L6-v2",
57
+ device: str = "cpu",
58
+ batch_size: int = 32,
59
+ normalize_embeddings: bool = True,
60
+ ):
61
+ """
62
+ Parameters:
63
+ model_name: 模型名称(HuggingFace 模型 ID)
64
+ device: 设备 ('cpu', 'cuda', 'mps')
65
+ batch_size: 批处理大小
66
+ normalize_embeddings: 是否归一化向量
67
+ """
68
+ if not SENTENCE_TRANSFORMERS_AVAILABLE:
69
+ raise ImportError(
70
+ "Sentence Transformers is not installed. "
71
+ "Install with: pip install sentence-transformers"
72
+ )
73
+
74
+ self.model_name = model_name
75
+ self.device = device
76
+ self.batch_size = batch_size
77
+ self.normalize_embeddings = normalize_embeddings
78
+
79
+ # 加载模型
80
+ self.model = SentenceTransformer(model_name, device=device)
81
+
82
+ async def embed_query(self, text: str) -> List[float]:
83
+ """
84
+ 对单个查询文本进行向量化
85
+
86
+ Parameters:
87
+ text: 查询文本
88
+
89
+ Returns:
90
+ 向量(列表)
91
+ """
92
+ # Sentence Transformers 的 encode 是同步的
93
+ # 在 async 函数中直接调用(小型模型通常很快)
94
+ embedding = self.model.encode(
95
+ text,
96
+ normalize_embeddings=self.normalize_embeddings,
97
+ convert_to_numpy=True
98
+ )
99
+ return embedding.tolist()
100
+
101
+ async def embed_documents(self, texts: List[str]) -> List[List[float]]:
102
+ """
103
+ 批量向量化文档
104
+
105
+ Parameters:
106
+ texts: 文本列表
107
+
108
+ Returns:
109
+ 向量列表
110
+ """
111
+ # 过滤空文本
112
+ non_empty_texts = [t for t in texts if t.strip()]
113
+
114
+ if not non_empty_texts:
115
+ dimension = self.get_dimension()
116
+ return [[0.0] * dimension] * len(texts)
117
+
118
+ # 批量编码
119
+ embeddings = self.model.encode(
120
+ non_empty_texts,
121
+ batch_size=self.batch_size,
122
+ normalize_embeddings=self.normalize_embeddings,
123
+ convert_to_numpy=True,
124
+ show_progress_bar=False
125
+ )
126
+
127
+ vectors = embeddings.tolist()
128
+
129
+ # 处理空文本位置
130
+ result = []
131
+ non_empty_idx = 0
132
+ dimension = len(vectors[0])
133
+
134
+ for text in texts:
135
+ if text.strip():
136
+ result.append(vectors[non_empty_idx])
137
+ non_empty_idx += 1
138
+ else:
139
+ result.append([0.0] * dimension)
140
+
141
+ return result
142
+
143
+ def get_dimension(self) -> int:
144
+ """返回向量维度"""
145
+ return self.model.get_sentence_embedding_dimension()
@@ -0,0 +1,8 @@
1
+ from .mock import MockLLM
2
+ from .rule import RuleLLM
3
+
4
+ try:
5
+ from .openai import OpenAILLM
6
+ __all__ = ["MockLLM", "RuleLLM", "OpenAILLM"]
7
+ except ImportError:
8
+ __all__ = ["MockLLM", "RuleLLM"]
@@ -0,0 +1,34 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ from typing import AsyncGenerator, Dict, List, Optional
5
+
6
+ from loom.interfaces.llm import BaseLLM
7
+
8
+
9
+ class MockLLM(BaseLLM):
10
+ """用于测试与示例的 Mock LLM。"""
11
+
12
+ def __init__(self, responses: Optional[List[str]] = None, name: str = "mock-llm") -> None:
13
+ self._responses = list(responses or ["OK"])
14
+ self._model_name = name
15
+
16
+ @property
17
+ def model_name(self) -> str:
18
+ return self._model_name
19
+
20
+ async def generate(self, messages: List[Dict]) -> str:
21
+ if self._responses:
22
+ return self._responses.pop(0)
23
+ return "".join(m.get("content", "") for m in messages if m.get("role") == "user")
24
+
25
+ async def stream(self, messages: List[Dict]) -> AsyncGenerator[str, None]:
26
+ text = await self.generate(messages)
27
+ # 简单逐字符流
28
+ for ch in text:
29
+ await asyncio.sleep(0) # 让出循环
30
+ yield ch
31
+
32
+ async def generate_with_tools(self, messages: List[Dict], tools: List[Dict]) -> Dict:
33
+ return {"content": await self.generate(messages), "tool_calls": []}
34
+
@@ -0,0 +1,168 @@
1
+ """OpenAI LLM 实现 - 支持工具调用与流式输出"""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+ from typing import Any, AsyncGenerator, Dict, List, Optional
7
+
8
+ from loom.interfaces.llm import BaseLLM
9
+
10
+ try:
11
+ from openai import AsyncOpenAI
12
+ except ImportError:
13
+ AsyncOpenAI = None # type: ignore
14
+
15
+
16
+ class OpenAILLM(BaseLLM):
17
+ """OpenAI LLM 实现 - 支持 gpt-4/gpt-3.5-turbo 等模型"""
18
+
19
+ def __init__(
20
+ self,
21
+ api_key: str,
22
+ model: str = "gpt-4",
23
+ base_url: Optional[str] = None,
24
+ temperature: float = 0.7,
25
+ max_tokens: Optional[int] = None,
26
+ **kwargs: Any,
27
+ ) -> None:
28
+ if AsyncOpenAI is None:
29
+ raise ImportError("Please install openai package: pip install openai")
30
+
31
+ self.client = AsyncOpenAI(api_key=api_key, base_url=base_url)
32
+ self._model = model
33
+ self.temperature = temperature
34
+ self.max_tokens = max_tokens
35
+ self.kwargs = kwargs
36
+
37
+ @property
38
+ def model_name(self) -> str:
39
+ return self._model
40
+
41
+ @property
42
+ def supports_tools(self) -> bool:
43
+ # GPT-4 和 GPT-3.5-turbo 都支持工具调用
44
+ return "gpt-4" in self._model.lower() or "gpt-3.5-turbo" in self._model.lower()
45
+
46
+ async def generate(self, messages: List[Dict]) -> str:
47
+ """同步生成(非流式)"""
48
+ params = {
49
+ "model": self._model,
50
+ "messages": messages,
51
+ "temperature": self.temperature,
52
+ }
53
+ if self.max_tokens:
54
+ params["max_tokens"] = self.max_tokens
55
+
56
+ response = await self.client.chat.completions.create(**params)
57
+ return response.choices[0].message.content or ""
58
+
59
+ async def stream(self, messages: List[Dict]) -> AsyncGenerator[str, None]:
60
+ """流式生成"""
61
+ params = {
62
+ "model": self._model,
63
+ "messages": messages,
64
+ "temperature": self.temperature,
65
+ "stream": True,
66
+ }
67
+ if self.max_tokens:
68
+ params["max_tokens"] = self.max_tokens
69
+
70
+ stream = await self.client.chat.completions.create(**params)
71
+ async for chunk in stream:
72
+ content = chunk.choices[0].delta.content
73
+ if content:
74
+ yield content
75
+
76
+ async def generate_with_tools(self, messages: List[Dict], tools: List[Dict]) -> Dict:
77
+ """带工具调用的生成"""
78
+ params = {
79
+ "model": self._model,
80
+ "messages": messages,
81
+ "tools": tools,
82
+ "temperature": self.temperature,
83
+ }
84
+ if self.max_tokens:
85
+ params["max_tokens"] = self.max_tokens
86
+
87
+ response = await self.client.chat.completions.create(**params)
88
+ message = response.choices[0].message
89
+
90
+ # 解析工具调用
91
+ tool_calls = []
92
+ if message.tool_calls:
93
+ for tc in message.tool_calls:
94
+ tool_calls.append(
95
+ {
96
+ "id": tc.id,
97
+ "name": tc.function.name,
98
+ "arguments": json.loads(tc.function.arguments) if tc.function.arguments else {},
99
+ }
100
+ )
101
+
102
+ return {
103
+ "content": message.content or "",
104
+ "tool_calls": tool_calls if tool_calls else None,
105
+ }
106
+
107
+ async def stream_with_tools(
108
+ self, messages: List[Dict], tools: List[Dict]
109
+ ) -> AsyncGenerator[Dict[str, Any], None]:
110
+ """流式工具调用(高级功能)"""
111
+ params = {
112
+ "model": self._model,
113
+ "messages": messages,
114
+ "tools": tools,
115
+ "temperature": self.temperature,
116
+ "stream": True,
117
+ }
118
+ if self.max_tokens:
119
+ params["max_tokens"] = self.max_tokens
120
+
121
+ stream = await self.client.chat.completions.create(**params)
122
+
123
+ # 累积工具调用信息
124
+ accumulated_tool_calls: Dict[int, Dict] = {}
125
+
126
+ async for chunk in stream:
127
+ delta = chunk.choices[0].delta
128
+
129
+ # 文本增量
130
+ if delta.content:
131
+ yield {"type": "text_delta", "content": delta.content}
132
+
133
+ # 工具调用增量
134
+ if delta.tool_calls:
135
+ for tc_delta in delta.tool_calls:
136
+ idx = tc_delta.index
137
+ if idx not in accumulated_tool_calls:
138
+ accumulated_tool_calls[idx] = {
139
+ "id": tc_delta.id or "",
140
+ "name": "",
141
+ "arguments": "",
142
+ }
143
+
144
+ if tc_delta.id:
145
+ accumulated_tool_calls[idx]["id"] = tc_delta.id
146
+ if tc_delta.function.name:
147
+ accumulated_tool_calls[idx]["name"] = tc_delta.function.name
148
+ if tc_delta.function.arguments:
149
+ accumulated_tool_calls[idx]["arguments"] += tc_delta.function.arguments
150
+
151
+ # 流结束后,输出完整的工具调用
152
+ if accumulated_tool_calls:
153
+ tool_calls = []
154
+ for tc_data in accumulated_tool_calls.values():
155
+ try:
156
+ arguments = json.loads(tc_data["arguments"]) if tc_data["arguments"] else {}
157
+ except json.JSONDecodeError:
158
+ arguments = {}
159
+
160
+ tool_calls.append(
161
+ {
162
+ "id": tc_data["id"],
163
+ "name": tc_data["name"],
164
+ "arguments": arguments,
165
+ }
166
+ )
167
+
168
+ yield {"type": "tool_calls", "tool_calls": tool_calls}
@@ -0,0 +1,102 @@
1
+ from __future__ import annotations
2
+
3
+ import asyncio
4
+ import re
5
+ from typing import AsyncGenerator, Dict, List
6
+
7
+ from loom.interfaces.llm import BaseLLM
8
+
9
+
10
+ class RuleLLM(BaseLLM):
11
+ """规则驱动的演示 LLM:
12
+ - 若用户消息包含 "calc:" 前缀,则产生 calculator 的工具调用。
13
+ - 否则返回纯文本。
14
+ """
15
+
16
+ def __init__(self, model: str = "rule-llm") -> None:
17
+ self._model_name = model
18
+
19
+ @property
20
+ def model_name(self) -> str:
21
+ return self._model_name
22
+
23
+ @property
24
+ def supports_tools(self) -> bool:
25
+ return True
26
+
27
+ async def generate(self, messages: List[Dict]) -> str:
28
+ last = messages[-1]["content"] if messages else ""
29
+ return f"Echo: {last}"
30
+
31
+ async def stream(self, messages: List[Dict]) -> AsyncGenerator[str, None]:
32
+ text = await self.generate(messages)
33
+ for ch in text:
34
+ await asyncio.sleep(0)
35
+ yield ch
36
+
37
+ async def generate_with_tools(self, messages: List[Dict], tools: List[Dict]) -> Dict:
38
+ if not messages:
39
+ return {"content": "", "tool_calls": []}
40
+
41
+ last_msg = messages[-1]
42
+ last_role = last_msg.get("role")
43
+ last = last_msg.get("content", "")
44
+
45
+ # 若上条是工具结果,直接返回总结,不再调用工具
46
+ if last_role == "tool":
47
+ return {"content": f"工具执行完成,输出: {last}", "tool_calls": []}
48
+
49
+ # 简易规则:calc/read/write/glob/grep
50
+ m = re.search(r"calc:\s*([0-9+\-*/. ()]+)", last, re.IGNORECASE)
51
+ if m:
52
+ expr = m.group(1).strip()
53
+ return {
54
+ "content": "我将使用 calculator 计算该表达式。",
55
+ "tool_calls": [
56
+ {"id": "tool_1", "name": "calculator", "arguments": {"expression": expr}}
57
+ ],
58
+ }
59
+
60
+ m = re.search(r"read:\s*(.+)$", last, re.IGNORECASE)
61
+ if m:
62
+ path = m.group(1).strip()
63
+ return {
64
+ "content": "我将读取指定文件。",
65
+ "tool_calls": [{"id": "tool_1", "name": "read_file", "arguments": {"path": path}}],
66
+ }
67
+
68
+ m = re.search(r"write:\s*([^<]+)<<<(.*)$", last, re.IGNORECASE | re.DOTALL)
69
+ if m:
70
+ path = m.group(1).strip()
71
+ content = m.group(2).strip()
72
+ return {
73
+ "content": "我将写入指定文件。",
74
+ "tool_calls": [
75
+ {"id": "tool_1", "name": "write_file", "arguments": {"path": path, "content": content}}
76
+ ],
77
+ }
78
+
79
+ m = re.search(r"glob:\s*(.+)$", last, re.IGNORECASE)
80
+ if m:
81
+ pattern = m.group(1).strip()
82
+ return {
83
+ "content": "我将搜索匹配的文件。",
84
+ "tool_calls": [{"id": "tool_1", "name": "glob", "arguments": {"pattern": pattern}}],
85
+ }
86
+
87
+ m = re.search(r"grep:\s*(.+?)\s+in\s+(.+)$", last, re.IGNORECASE)
88
+ if m:
89
+ pattern = m.group(1).strip()
90
+ target = m.group(2).strip()
91
+ args = {"pattern": pattern}
92
+ if target.startswith("glob:"):
93
+ args["glob_pattern"] = target.split(":", 1)[1].strip()
94
+ else:
95
+ args["path"] = target
96
+ return {
97
+ "content": "我将检索文本匹配。",
98
+ "tool_calls": [{"id": "tool_1", "name": "grep", "arguments": args}],
99
+ }
100
+
101
+ # 默认:无工具
102
+ return {"content": await self.generate(messages), "tool_calls": []}
@@ -0,0 +1,5 @@
1
+ from .in_memory import InMemoryMemory
2
+ from .persistent_memory import PersistentMemory
3
+
4
+ __all__ = ["InMemoryMemory", "PersistentMemory"]
5
+
@@ -0,0 +1,21 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import List, Optional
4
+
5
+ from loom.core.types import Message
6
+ from loom.interfaces.memory import BaseMemory
7
+
8
+
9
+ class InMemoryMemory(BaseMemory):
10
+ def __init__(self) -> None:
11
+ self._messages: List[Message] = []
12
+
13
+ async def add_message(self, message: Message) -> None:
14
+ self._messages.append(message)
15
+
16
+ async def get_messages(self, limit: Optional[int] = None) -> List[Message]:
17
+ return self._messages[-limit:] if limit else list(self._messages)
18
+
19
+ async def clear(self) -> None:
20
+ self._messages.clear()
21
+