loom-agent 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of loom-agent might be problematic. Click here for more details.

Files changed (39) hide show
  1. loom/builtin/tools/calculator.py +4 -0
  2. loom/builtin/tools/document_search.py +5 -0
  3. loom/builtin/tools/glob.py +4 -0
  4. loom/builtin/tools/grep.py +4 -0
  5. loom/builtin/tools/http_request.py +5 -0
  6. loom/builtin/tools/python_repl.py +5 -0
  7. loom/builtin/tools/read_file.py +4 -0
  8. loom/builtin/tools/task.py +105 -0
  9. loom/builtin/tools/web_search.py +4 -0
  10. loom/builtin/tools/write_file.py +4 -0
  11. loom/components/agent.py +121 -5
  12. loom/core/agent_executor.py +777 -321
  13. loom/core/compression_manager.py +17 -10
  14. loom/core/context_assembly.py +437 -0
  15. loom/core/events.py +660 -0
  16. loom/core/execution_context.py +119 -0
  17. loom/core/tool_orchestrator.py +383 -0
  18. loom/core/turn_state.py +188 -0
  19. loom/core/types.py +15 -4
  20. loom/core/unified_coordination.py +389 -0
  21. loom/interfaces/event_producer.py +172 -0
  22. loom/interfaces/tool.py +22 -1
  23. loom/security/__init__.py +13 -0
  24. loom/security/models.py +85 -0
  25. loom/security/path_validator.py +128 -0
  26. loom/security/validator.py +346 -0
  27. loom/tasks/PHASE_1_FOUNDATION/task_1.1_agent_events.md +121 -0
  28. loom/tasks/PHASE_1_FOUNDATION/task_1.2_streaming_api.md +521 -0
  29. loom/tasks/PHASE_1_FOUNDATION/task_1.3_context_assembler.md +606 -0
  30. loom/tasks/PHASE_2_CORE_FEATURES/task_2.1_tool_orchestrator.md +743 -0
  31. loom/tasks/PHASE_2_CORE_FEATURES/task_2.2_security_validator.md +676 -0
  32. loom/tasks/README.md +109 -0
  33. loom/tasks/__init__.py +11 -0
  34. loom/tasks/sql_placeholder.py +100 -0
  35. loom_agent-0.0.3.dist-info/METADATA +292 -0
  36. {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/RECORD +38 -19
  37. loom_agent-0.0.1.dist-info/METADATA +0 -457
  38. {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/WHEEL +0 -0
  39. {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/licenses/LICENSE +0 -0
loom/core/types.py CHANGED
@@ -68,15 +68,26 @@ class MessageQueueItem(BaseModel):
68
68
  class CompressionMetadata(BaseModel):
69
69
  """Metadata for AU2 8-segment compression (US2)."""
70
70
 
71
- original_message_count: int = Field(..., ge=1, description="Messages before compression")
72
- compressed_message_count: int = Field(..., ge=1, description="Messages after compression")
73
- original_token_count: int = Field(..., ge=1, description="Tokens before compression")
74
- compressed_token_count: int = Field(..., ge=1, description="Tokens after compression")
71
+ original_message_count: int = Field(..., ge=0, description="Messages before compression")
72
+ compressed_message_count: int = Field(..., ge=0, description="Messages after compression")
73
+ original_token_count: int = Field(..., ge=0, description="Tokens before compression")
74
+ compressed_token_count: int = Field(..., ge=0, description="Tokens after compression")
75
75
  compression_ratio: float = Field(..., ge=0.0, le=1.0, description="Reduction ratio (0.75 = 75%)")
76
76
  key_topics: List[str] = Field(default_factory=list, description="Extracted key topics")
77
77
  compression_method: str = Field(default="au2_8segment", description="Algorithm used")
78
78
  timestamp: datetime = Field(default_factory=datetime.utcnow, description="Compression time")
79
79
 
80
+ # Backward compatibility aliases
81
+ @property
82
+ def original_tokens(self) -> int:
83
+ """Backward compatibility alias for original_token_count."""
84
+ return self.original_token_count
85
+
86
+ @property
87
+ def compressed_tokens(self) -> int:
88
+ """Backward compatibility alias for compressed_token_count."""
89
+ return self.compressed_token_count
90
+
80
91
 
81
92
  # -----------------------------------------------------------------------------
82
93
  # T013: SubAgentContext - US3 Sub-Agent Isolation
@@ -0,0 +1,389 @@
1
+ """
2
+ 统一执行上下文 - Loom 框架四大核心能力的协调中心
3
+
4
+ 实现智能上下文在 TT 递归中组织复杂任务的能力
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from typing import Optional, Dict, Any, List, TYPE_CHECKING
9
+ from pathlib import Path
10
+ import time
11
+ import uuid
12
+
13
+ from loom.core.context_assembly import ContextAssembler, ComponentPriority
14
+ from loom.builtin.tools.task import TaskTool
15
+ from loom.core.events import EventProcessor, EventFilter, AgentEventType
16
+
17
+ # 避免循环导入
18
+ if TYPE_CHECKING:
19
+ from loom.core.agent_executor import TaskHandler
20
+
21
+
22
+ @dataclass
23
+ class CoordinationConfig:
24
+ """
25
+ 统一协调配置类 - 管理所有协调参数
26
+
27
+ 集中管理魔法数字,便于调优和测试
28
+ """
29
+ # 任务分析阈值
30
+ deep_recursion_threshold: int = 3
31
+ """深度递归阈值 - 超过此深度会调整上下文策略"""
32
+
33
+ high_complexity_threshold: float = 0.7
34
+ """高复杂度阈值 - 超过此值会启用子代理"""
35
+
36
+ completion_score_threshold: float = 0.8
37
+ """任务完成度阈值 - 超过此值认为可以完成任务"""
38
+
39
+ # 缓存配置
40
+ context_cache_size: int = 100
41
+ """上下文组件缓存大小"""
42
+
43
+ subagent_pool_size: int = 5
44
+ """子代理池大小"""
45
+
46
+ # 事件处理配置
47
+ event_batch_size: int = 10
48
+ """事件批处理大小"""
49
+
50
+ event_batch_timeout: float = 0.05
51
+ """事件批处理超时时间(秒)- 降低延迟"""
52
+
53
+ # 性能目标
54
+ max_execution_time: float = 30.0
55
+ """最大执行时间(秒)"""
56
+
57
+ max_token_usage: float = 0.8
58
+ """最大 token 使用率"""
59
+
60
+ min_cache_hit_rate: float = 0.6
61
+ """最小缓存命中率"""
62
+
63
+ max_subagent_count: int = 3
64
+ """最大子代理数量"""
65
+
66
+
67
+ @dataclass
68
+ class UnifiedExecutionContext:
69
+ """
70
+ 统一的执行上下文,协调四大核心能力
71
+
72
+ 实现目标:
73
+ 1. 智能上下文在 TT 递归中组织复杂任务
74
+ 2. 动态调整策略和资源分配
75
+ 3. 统一的状态管理和性能优化
76
+ 4. 跨组件的协调和通信
77
+ """
78
+
79
+ # 基础信息
80
+ execution_id: str = field(default_factory=lambda: str(uuid.uuid4()))
81
+ correlation_id: Optional[str] = None
82
+ working_dir: Optional[Path] = None
83
+
84
+ # 协调配置
85
+ config: CoordinationConfig = field(default_factory=CoordinationConfig)
86
+
87
+ # 四大核心能力实例
88
+ context_assembler: Optional[ContextAssembler] = None
89
+ task_tool: Optional[TaskTool] = None
90
+ event_processor: Optional[EventProcessor] = None
91
+ task_handlers: List['TaskHandler'] = field(default_factory=list)
92
+
93
+ # 统一状态管理
94
+ execution_state: Dict[str, Any] = field(default_factory=dict)
95
+ performance_metrics: Dict[str, Any] = field(default_factory=dict)
96
+
97
+ # 运行时状态已移除 - 这些字段应该在 IntelligentCoordinator 中管理
98
+
99
+
100
+ class IntelligentCoordinator:
101
+ """
102
+ 智能协调器 - 统一管理四大核心能力
103
+
104
+ 核心功能:
105
+ 1. 协调 ContextAssembler 与 TT 递归的集成
106
+ 2. 管理 TaskTool 的子代理执行策略
107
+ 3. 优化 EventProcessor 的事件处理
108
+ 4. 动态调整 TaskHandler 的处理策略
109
+ """
110
+
111
+ def __init__(self, context: UnifiedExecutionContext):
112
+ self.context = context
113
+ self.config = context.config
114
+
115
+ # 运行时状态
116
+ self.current_task_type: Optional[str] = None
117
+ self.task_complexity_score: float = 0.0
118
+ self.recursion_context: Dict[str, Any] = {} # 快捷访问配置
119
+ self._strategy_history: List[Dict[str, Any]] = []
120
+
121
+ # 移除魔法属性注入方法 - 改为直接在协调器内部处理
122
+
123
+ def coordinate_tt_recursion(self,
124
+ messages: List,
125
+ turn_state,
126
+ context) -> List:
127
+ """协调 TT 递归执行"""
128
+
129
+ # 1. 分析任务类型和复杂度
130
+ self._analyze_task_context(messages, turn_state)
131
+
132
+ # 2. 智能上下文组装
133
+ assembled_context = self._intelligent_context_assembly(
134
+ messages, turn_state
135
+ )
136
+
137
+ # 3. 动态任务处理策略
138
+ task_strategy = self._determine_task_strategy(messages, turn_state)
139
+
140
+ # 4. 协调执行计划
141
+ execution_plan = self._create_execution_plan(
142
+ assembled_context, task_strategy
143
+ )
144
+
145
+ return execution_plan
146
+
147
+ def _analyze_task_context(self, messages: List, turn_state):
148
+ """分析任务上下文"""
149
+ if not messages:
150
+ return
151
+
152
+ # 分析任务类型
153
+ task_content = messages[0].content if messages else ""
154
+ self.current_task_type = self._classify_task_type(task_content)
155
+
156
+ # 计算任务复杂度
157
+ self.task_complexity_score = self._calculate_task_complexity(
158
+ task_content, turn_state
159
+ )
160
+
161
+ # 更新递归上下文
162
+ self.recursion_context.update({
163
+ "turn_counter": turn_state.turn_counter,
164
+ "max_iterations": turn_state.max_iterations,
165
+ "task_type": self.current_task_type,
166
+ "complexity": self.task_complexity_score
167
+ })
168
+
169
+ def _classify_task_type(self, task_content: str) -> str:
170
+ """分类任务类型"""
171
+ task_lower = task_content.lower()
172
+
173
+ if any(keyword in task_lower for keyword in ["analyze", "analysis", "分析"]):
174
+ return "analysis"
175
+ elif any(keyword in task_lower for keyword in ["generate", "create", "生成", "创建"]):
176
+ return "generation"
177
+ elif any(keyword in task_lower for keyword in ["sql", "query", "查询"]):
178
+ return "sql"
179
+ elif any(keyword in task_lower for keyword in ["test", "testing", "测试"]):
180
+ return "testing"
181
+ elif any(keyword in task_lower for keyword in ["report", "报告"]):
182
+ return "reporting"
183
+ else:
184
+ return "general"
185
+
186
+ def _calculate_task_complexity(self, task_content: str, turn_state) -> float:
187
+ """计算任务复杂度"""
188
+ complexity = 0.0
189
+
190
+ # 基于内容长度
191
+ complexity += min(len(task_content) / 1000, 0.3)
192
+
193
+ # 基于关键词数量
194
+ complex_keywords = ["complex", "multiple", "several", "various",
195
+ "复杂", "多个", "各种", "多种"]
196
+ keyword_count = sum(1 for kw in complex_keywords if kw in task_content.lower())
197
+ complexity += min(keyword_count * 0.1, 0.3)
198
+
199
+ # 基于递归深度
200
+ recursion_factor = turn_state.turn_counter / turn_state.max_iterations
201
+ complexity += recursion_factor * 0.4
202
+
203
+ return min(complexity, 1.0)
204
+
205
+ def _intelligent_context_assembly(self, messages: List, turn_state) -> str:
206
+ """智能上下文组装"""
207
+ if not self.context.context_assembler:
208
+ return ""
209
+
210
+ assembler = self.context.context_assembler
211
+
212
+ # 基于任务类型调整策略
213
+ task_type = self.current_task_type
214
+ complexity = self.task_complexity_score
215
+
216
+ if task_type == "analysis" and complexity > self.config.high_complexity_threshold:
217
+ # 复杂分析任务需要更多示例和指导
218
+ assembler.adjust_priority("examples", ComponentPriority.MEDIUM)
219
+ assembler.adjust_priority("analysis_guidelines", ComponentPriority.HIGH)
220
+
221
+ elif task_type == "sql" and complexity > 0.5:
222
+ # SQL 任务需要表结构和查询示例
223
+ assembler.adjust_priority("schema_info", ComponentPriority.HIGH)
224
+ assembler.adjust_priority("query_examples", ComponentPriority.MEDIUM)
225
+
226
+ # 基于递归深度调整优先级
227
+ recursion_depth = turn_state.turn_counter
228
+ if recursion_depth > self.config.deep_recursion_threshold:
229
+ # 深度递归时,优先保留核心指令
230
+ assembler.adjust_priority("base_instructions", ComponentPriority.CRITICAL)
231
+ assembler.adjust_priority("tool_definitions", ComponentPriority.HIGH)
232
+
233
+ # 降低示例优先级以节省空间
234
+ assembler.adjust_priority("examples", ComponentPriority.LOW)
235
+
236
+ return assembler.assemble()
237
+
238
+ def _determine_task_strategy(self, messages: List, turn_state) -> Dict[str, Any]:
239
+ """确定任务处理策略"""
240
+ strategy = {
241
+ "use_sub_agents": False,
242
+ "parallel_execution": False,
243
+ "context_priority": "balanced",
244
+ "event_batching": True,
245
+ "subagent_types": [],
246
+ "context_focus": []
247
+ }
248
+
249
+ task_type = self.current_task_type
250
+ complexity = self.task_complexity_score
251
+
252
+ # 基于任务复杂度决定是否使用子代理
253
+ if complexity > self.config.high_complexity_threshold:
254
+ strategy["use_sub_agents"] = True
255
+ strategy["parallel_execution"] = True
256
+
257
+ # 根据任务类型选择子代理类型
258
+ if task_type == "analysis":
259
+ strategy["subagent_types"] = ["code-analyzer", "quality-checker"]
260
+ elif task_type == "sql":
261
+ strategy["subagent_types"] = ["sql-expert", "data-analyzer"]
262
+ elif task_type == "reporting":
263
+ strategy["subagent_types"] = ["report-writer", "data-processor"]
264
+
265
+ # 基于递归深度调整策略
266
+ if turn_state.turn_counter > self.config.deep_recursion_threshold:
267
+ strategy["context_priority"] = "minimal"
268
+ strategy["event_batching"] = True
269
+ strategy["context_focus"] = ["base_instructions", "tool_definitions"]
270
+
271
+ return strategy
272
+
273
+ def _create_execution_plan(self,
274
+ assembled_context: str,
275
+ task_strategy: Dict[str, Any]) -> Dict[str, Any]:
276
+ """创建执行计划"""
277
+ return {
278
+ "context": assembled_context,
279
+ "strategy": task_strategy,
280
+ "coordinator_config": {
281
+ "task_type": self.current_task_type,
282
+ "complexity": self.task_complexity_score,
283
+ "recursion_context": self.recursion_context
284
+ },
285
+ "performance_targets": self._get_performance_targets(task_strategy)
286
+ }
287
+
288
+ def _get_performance_targets(self, strategy: Dict[str, Any]) -> Dict[str, Any]:
289
+ """获取性能目标"""
290
+ targets = {
291
+ "max_execution_time": self.config.max_execution_time,
292
+ "max_token_usage": self.config.max_token_usage,
293
+ "min_cache_hit_rate": self.config.min_cache_hit_rate,
294
+ "max_subagent_count": self.config.max_subagent_count
295
+ }
296
+
297
+ if strategy["use_sub_agents"]:
298
+ targets["max_execution_time"] = self.config.max_execution_time * 2
299
+ targets["max_subagent_count"] = len(strategy["subagent_types"])
300
+
301
+ return targets
302
+
303
+
304
+ def get_unified_metrics(self) -> Dict[str, Any]:
305
+ """获取统一的性能指标"""
306
+ metrics = {
307
+ "execution_id": self.context.execution_id,
308
+ "timestamp": time.time(),
309
+ "task_analysis": {
310
+ "task_type": self.current_task_type,
311
+ "complexity_score": self.task_complexity_score,
312
+ "recursion_context": self.recursion_context
313
+ }
314
+ }
315
+
316
+ # 收集各组件指标
317
+ if self.context.context_assembler:
318
+ metrics["context_assembler"] = self.context.context_assembler.get_component_stats()
319
+
320
+ if self.context.task_tool:
321
+ metrics["task_tool"] = self.context.task_tool.get_pool_stats()
322
+
323
+ if self.context.event_processor:
324
+ metrics["event_processor"] = self.context.event_processor.get_stats()
325
+
326
+ return metrics
327
+
328
+ def adjust_strategy_based_on_performance(self,
329
+ current_metrics: Dict[str, Any]):
330
+ """基于性能指标动态调整策略"""
331
+
332
+ # 分析性能瓶颈
333
+ bottlenecks = self._identify_bottlenecks(current_metrics)
334
+
335
+ adjustments_made = []
336
+
337
+ for bottleneck in bottlenecks:
338
+ if bottleneck == "context_assembly_slow":
339
+ # 调整上下文组装策略
340
+ if self.context.context_assembler:
341
+ self.context.context_assembler.enable_caching = True
342
+ self.context.context_assembler._cache_size = self.config.context_cache_size * 2
343
+ adjustments_made.append("增加上下文缓存大小")
344
+
345
+ elif bottleneck == "sub_agent_creation_overhead":
346
+ # 调整子代理池策略
347
+ if self.context.task_tool:
348
+ self.context.task_tool.pool_size = self.config.subagent_pool_size * 2
349
+ self.context.task_tool.enable_pooling = True
350
+ adjustments_made.append("增加子代理池大小")
351
+
352
+ elif bottleneck == "event_processing_latency":
353
+ # 调整事件处理策略
354
+ if self.context.event_processor:
355
+ for filter_obj in self.context.event_processor.filters:
356
+ filter_obj.batch_size = self.config.event_batch_size * 2
357
+ filter_obj.batch_timeout = self.config.event_batch_timeout / 2
358
+ adjustments_made.append("优化事件处理批量设置")
359
+
360
+ # 记录策略调整历史
361
+ self._strategy_history.append({
362
+ "timestamp": time.time(),
363
+ "bottlenecks": bottlenecks,
364
+ "adjustments": adjustments_made
365
+ })
366
+
367
+ def _identify_bottlenecks(self, metrics: Dict[str, Any]) -> List[str]:
368
+ """识别性能瓶颈"""
369
+ bottlenecks = []
370
+
371
+ # 检查上下文组装性能
372
+ if "context_assembler" in metrics:
373
+ ca_metrics = metrics["context_assembler"]
374
+ if ca_metrics.get("budget_utilization", 0) > 0.9:
375
+ bottlenecks.append("context_assembly_slow")
376
+
377
+ # 检查子代理性能
378
+ if "task_tool" in metrics:
379
+ tt_metrics = metrics["task_tool"]
380
+ if tt_metrics.get("cache_hit_rate", 0) < 0.3:
381
+ bottlenecks.append("sub_agent_creation_overhead")
382
+
383
+ # 检查事件处理性能
384
+ if "event_processor" in metrics:
385
+ ep_metrics = metrics["event_processor"]
386
+ if ep_metrics.get("average_processing_time", 0) > 0.1:
387
+ bottlenecks.append("event_processing_latency")
388
+
389
+ return bottlenecks
@@ -0,0 +1,172 @@
1
+ """
2
+ Event Producer Protocol for Loom 2.0
3
+
4
+ Defines the interface that all event-producing components must implement.
5
+ This enables type-safe composition of streaming agents.
6
+ """
7
+
8
+ from typing import Protocol, AsyncGenerator, runtime_checkable
9
+ from loom.core.events import AgentEvent
10
+
11
+
12
+ @runtime_checkable
13
+ class EventProducer(Protocol):
14
+ """
15
+ Protocol for components that produce AgentEvent streams.
16
+
17
+ Any component that participates in the agent execution pipeline
18
+ and produces events should implement this protocol.
19
+
20
+ Example:
21
+ ```python
22
+ class MyCustomExecutor(EventProducer):
23
+ async def produce_events(self) -> AsyncGenerator[AgentEvent, None]:
24
+ yield AgentEvent.phase_start("custom_phase")
25
+ # ... do work
26
+ yield AgentEvent.phase_end("custom_phase")
27
+ ```
28
+ """
29
+
30
+ async def produce_events(self) -> AsyncGenerator[AgentEvent, None]:
31
+ """
32
+ Produce a stream of agent events.
33
+
34
+ Yields:
35
+ AgentEvent: Events representing execution progress
36
+
37
+ Example:
38
+ ```python
39
+ async for event in producer.produce_events():
40
+ if event.type == AgentEventType.LLM_DELTA:
41
+ print(event.content, end="")
42
+ ```
43
+ """
44
+ ...
45
+
46
+
47
+ @runtime_checkable
48
+ class ToolExecutor(Protocol):
49
+ """
50
+ Protocol for tool execution components that produce events.
51
+
52
+ This is a specialized EventProducer for tool execution.
53
+ """
54
+
55
+ async def execute_tool(
56
+ self,
57
+ tool_name: str,
58
+ arguments: dict
59
+ ) -> AsyncGenerator[AgentEvent, None]:
60
+ """
61
+ Execute a tool and yield progress events.
62
+
63
+ Args:
64
+ tool_name: Name of the tool to execute
65
+ arguments: Tool arguments
66
+
67
+ Yields:
68
+ AgentEvent: Tool execution events (TOOL_EXECUTION_START,
69
+ TOOL_PROGRESS, TOOL_RESULT, or TOOL_ERROR)
70
+ """
71
+ ...
72
+
73
+
74
+ @runtime_checkable
75
+ class LLMEventProducer(Protocol):
76
+ """
77
+ Protocol for LLM components that produce streaming events.
78
+
79
+ This enables streaming LLM calls with real-time token generation.
80
+ """
81
+
82
+ async def stream_with_events(
83
+ self,
84
+ messages: list,
85
+ tools: list = None
86
+ ) -> AsyncGenerator[AgentEvent, None]:
87
+ """
88
+ Stream LLM generation as AgentEvents.
89
+
90
+ Args:
91
+ messages: Conversation messages
92
+ tools: Optional tool definitions
93
+
94
+ Yields:
95
+ AgentEvent: LLM events (LLM_START, LLM_DELTA, LLM_COMPLETE,
96
+ LLM_TOOL_CALLS)
97
+ """
98
+ ...
99
+
100
+
101
+ # ===== Helper Functions =====
102
+
103
+ async def merge_event_streams(
104
+ *producers: EventProducer
105
+ ) -> AsyncGenerator[AgentEvent, None]:
106
+ """
107
+ Merge multiple event streams into a single stream.
108
+
109
+ This is useful for parallel execution where multiple components
110
+ produce events concurrently.
111
+
112
+ Args:
113
+ *producers: EventProducer instances to merge
114
+
115
+ Yields:
116
+ AgentEvent: Events from all producers in arrival order
117
+
118
+ Example:
119
+ ```python
120
+ async for event in merge_event_streams(executor1, executor2):
121
+ print(event)
122
+ ```
123
+ """
124
+ import asyncio
125
+
126
+ # Create tasks for all producers
127
+ tasks = [
128
+ asyncio.create_task(_consume_producer(producer))
129
+ for producer in producers
130
+ ]
131
+
132
+ # Yield events as they arrive
133
+ pending = set(tasks)
134
+ while pending:
135
+ done, pending = await asyncio.wait(
136
+ pending,
137
+ return_when=asyncio.FIRST_COMPLETED
138
+ )
139
+
140
+ for task in done:
141
+ events = task.result()
142
+ for event in events:
143
+ yield event
144
+
145
+
146
+ async def _consume_producer(producer: EventProducer) -> list:
147
+ """Helper to consume a producer into a list"""
148
+ events = []
149
+ async for event in producer.produce_events():
150
+ events.append(event)
151
+ return events
152
+
153
+
154
+ async def collect_events(
155
+ producer: EventProducer
156
+ ) -> list:
157
+ """
158
+ Collect all events from a producer into a list.
159
+
160
+ Args:
161
+ producer: EventProducer to consume
162
+
163
+ Returns:
164
+ List of all events produced
165
+
166
+ Example:
167
+ ```python
168
+ events = await collect_events(my_executor)
169
+ print(f"Generated {len(events)} events")
170
+ ```
171
+ """
172
+ return await _consume_producer(producer)
loom/interfaces/tool.py CHANGED
@@ -7,12 +7,32 @@ from pydantic import BaseModel
7
7
 
8
8
 
9
9
  class BaseTool(ABC):
10
- """工具基础接口。"""
10
+ """
11
+ 工具基础接口。
12
+
13
+ Attributes:
14
+ name: Tool name (unique identifier)
15
+ description: Tool description for LLM
16
+ args_schema: Pydantic model for argument validation
17
+ is_read_only: Whether tool only reads data (safe to parallelize) 🆕
18
+ category: Tool category (general/destructive/network) 🆕
19
+ requires_confirmation: Whether tool requires user confirmation 🆕
20
+ """
11
21
 
12
22
  name: str
13
23
  description: str
14
24
  args_schema: type[BaseModel]
15
25
 
26
+ # 🆕 Loom 2.0 - Orchestration attributes
27
+ is_read_only: bool = False
28
+ """Whether this tool only reads data (safe to parallelize with other read-only tools)."""
29
+
30
+ category: str = "general"
31
+ """Tool category: 'general', 'destructive', 'network'."""
32
+
33
+ requires_confirmation: bool = False
34
+ """Whether this tool requires explicit user confirmation before execution."""
35
+
16
36
  @abstractmethod
17
37
  async def run(self, **kwargs) -> Any:
18
38
  raise NotImplementedError
@@ -23,5 +43,6 @@ class BaseTool(ABC):
23
43
 
24
44
  @property
25
45
  def is_concurrency_safe(self) -> bool:
46
+ """Legacy attribute for backward compatibility."""
26
47
  return True
27
48
 
@@ -0,0 +1,13 @@
1
+ """Security module for Loom 2.0"""
2
+
3
+ from loom.security.models import RiskLevel, SecurityDecision, PathSecurityResult
4
+ from loom.security.path_validator import PathSecurityValidator
5
+ from loom.security.validator import SecurityValidator
6
+
7
+ __all__ = [
8
+ "RiskLevel",
9
+ "SecurityDecision",
10
+ "PathSecurityResult",
11
+ "PathSecurityValidator",
12
+ "SecurityValidator",
13
+ ]