loom-agent 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of loom-agent might be problematic. Click here for more details.

loom/core/events.py CHANGED
@@ -4,6 +4,12 @@ Agent Event System for Loom 2.0
4
4
  This module defines the unified event model for streaming agent execution.
5
5
  Inspired by Claude Code's event-driven architecture.
6
6
 
7
+ 新特性 (Loom 0.0.3):
8
+ - 事件过滤和批量处理
9
+ - 智能事件聚合
10
+ - 性能优化的事件流
11
+ - 事件优先级管理
12
+
7
13
  Example:
8
14
  ```python
9
15
  agent = Agent(llm=llm, tools=tools)
@@ -412,3 +418,243 @@ class EventCollector:
412
418
  if finish_events:
413
419
  return finish_events[-1].content
414
420
  return None
421
+
422
+
423
+ class EventFilter:
424
+ """
425
+ 事件过滤器 - 提供高级事件过滤和批量处理能力
426
+
427
+ 新特性 (Loom 0.0.3):
428
+ - 智能事件过滤
429
+ - 批量事件处理
430
+ - 事件聚合和合并
431
+ - 性能优化的事件流
432
+ """
433
+
434
+ def __init__(self,
435
+ allowed_types: Optional[List[AgentEventType]] = None,
436
+ blocked_types: Optional[List[AgentEventType]] = None,
437
+ enable_batching: bool = True,
438
+ batch_size: int = 10,
439
+ batch_timeout: float = 0.1):
440
+ """
441
+ 初始化事件过滤器
442
+
443
+ Args:
444
+ allowed_types: 允许的事件类型列表(None = 全部允许)
445
+ blocked_types: 阻止的事件类型列表
446
+ enable_batching: 启用批量处理
447
+ batch_size: 批量大小
448
+ batch_timeout: 批量超时时间(秒)
449
+ """
450
+ self.allowed_types = allowed_types
451
+ self.blocked_types = blocked_types or []
452
+ self.enable_batching = enable_batching
453
+ self.batch_size = batch_size
454
+ self.batch_timeout = batch_timeout
455
+
456
+ # 批量处理状态
457
+ self._batch_buffer: List[AgentEvent] = []
458
+ self._last_batch_time = time.time()
459
+
460
+ def should_include(self, event: AgentEvent) -> bool:
461
+ """判断事件是否应该被包含"""
462
+ # 检查允许的类型
463
+ if self.allowed_types and event.type not in self.allowed_types:
464
+ return False
465
+
466
+ # 检查阻止的类型
467
+ if event.type in self.blocked_types:
468
+ return False
469
+
470
+ return True
471
+
472
+ def process_event(self, event: AgentEvent) -> List[AgentEvent]:
473
+ """
474
+ 处理单个事件,可能返回批量事件
475
+
476
+ Returns:
477
+ 处理后的事件列表
478
+ """
479
+ if not self.should_include(event):
480
+ return []
481
+
482
+ if not self.enable_batching:
483
+ return [event]
484
+
485
+ # 添加到批量缓冲区
486
+ self._batch_buffer.append(event)
487
+
488
+ # 检查是否需要输出批量事件
489
+ should_flush = (
490
+ len(self._batch_buffer) >= self.batch_size or
491
+ (time.time() - self._last_batch_time) >= self.batch_timeout
492
+ )
493
+
494
+ if should_flush:
495
+ return self._flush_batch()
496
+
497
+ return []
498
+
499
+ def _flush_batch(self) -> List[AgentEvent]:
500
+ """输出批量事件并清空缓冲区"""
501
+ if not self._batch_buffer:
502
+ return []
503
+
504
+ # 智能聚合相同类型的事件
505
+ aggregated_events = self._aggregate_events(self._batch_buffer)
506
+
507
+ # 清空缓冲区
508
+ self._batch_buffer.clear()
509
+ self._last_batch_time = time.time()
510
+
511
+ return aggregated_events
512
+
513
+ def _aggregate_events(self, events: List[AgentEvent]) -> List[AgentEvent]:
514
+ """智能聚合事件"""
515
+ if not events:
516
+ return []
517
+
518
+ # 按类型分组
519
+ events_by_type: Dict[AgentEventType, List[AgentEvent]] = {}
520
+ for event in events:
521
+ if event.type not in events_by_type:
522
+ events_by_type[event.type] = []
523
+ events_by_type[event.type].append(event)
524
+
525
+ aggregated = []
526
+
527
+ for event_type, type_events in events_by_type.items():
528
+ if event_type == AgentEventType.LLM_DELTA:
529
+ # 合并 LLM delta 事件
530
+ merged_content = "".join(e.content or "" for e in type_events)
531
+ if merged_content:
532
+ # 创建合并的事件
533
+ merged_event = AgentEvent(
534
+ type=AgentEventType.LLM_DELTA,
535
+ content=merged_content,
536
+ timestamp=type_events[0].timestamp,
537
+ metadata={
538
+ "batch_size": len(type_events),
539
+ "aggregated": True
540
+ }
541
+ )
542
+ aggregated.append(merged_event)
543
+ else:
544
+ # 其他类型的事件保持原样
545
+ aggregated.extend(type_events)
546
+
547
+ return aggregated
548
+
549
+ def flush_remaining(self) -> List[AgentEvent]:
550
+ """强制输出剩余的事件"""
551
+ return self._flush_batch()
552
+
553
+
554
+ class EventProcessor:
555
+ """
556
+ 事件处理器 - 提供高级事件处理能力
557
+
558
+ 新特性 (Loom 0.0.3):
559
+ - 事件优先级管理
560
+ - 智能事件路由
561
+ - 事件统计和分析
562
+ - 性能监控
563
+ """
564
+
565
+ def __init__(self,
566
+ filters: Optional[List[EventFilter]] = None,
567
+ enable_stats: bool = True):
568
+ """
569
+ 初始化事件处理器
570
+
571
+ Args:
572
+ filters: 事件过滤器列表
573
+ enable_stats: 启用统计功能
574
+ """
575
+ self.filters = filters or []
576
+ self.enable_stats = enable_stats
577
+
578
+ # 统计信息
579
+ self._stats = {
580
+ "total_events": 0,
581
+ "filtered_events": 0,
582
+ "batched_events": 0,
583
+ "events_by_type": {},
584
+ "processing_times": []
585
+ }
586
+
587
+ def process_events(self, events: List[AgentEvent]) -> List[AgentEvent]:
588
+ """
589
+ 批量处理事件
590
+
591
+ Args:
592
+ events: 输入事件列表
593
+
594
+ Returns:
595
+ 处理后的事件列表
596
+ """
597
+ if not events:
598
+ return []
599
+
600
+ start_time = time.time()
601
+ processed_events = []
602
+
603
+ for event in events:
604
+ # 更新统计
605
+ if self.enable_stats:
606
+ self._stats["total_events"] += 1
607
+ event_type = event.type.value
608
+ self._stats["events_by_type"][event_type] = \
609
+ self._stats["events_by_type"].get(event_type, 0) + 1
610
+
611
+ # 应用过滤器
612
+ for filter_obj in self.filters:
613
+ filtered = filter_obj.process_event(event)
614
+ processed_events.extend(filtered)
615
+
616
+ # 如果没有过滤器,直接添加事件
617
+ if not self.filters:
618
+ processed_events.append(event)
619
+
620
+ # 强制刷新所有过滤器的批量缓冲区
621
+ for filter_obj in self.filters:
622
+ remaining = filter_obj.flush_remaining()
623
+ processed_events.extend(remaining)
624
+
625
+ # 更新处理时间统计
626
+ if self.enable_stats:
627
+ processing_time = time.time() - start_time
628
+ self._stats["processing_times"].append(processing_time)
629
+ self._stats["filtered_events"] = len(processed_events)
630
+
631
+ return processed_events
632
+
633
+ def get_stats(self) -> Dict[str, Any]:
634
+ """获取处理统计信息"""
635
+ if not self.enable_stats:
636
+ return {}
637
+
638
+ avg_processing_time = (
639
+ sum(self._stats["processing_times"]) / len(self._stats["processing_times"])
640
+ if self._stats["processing_times"] else 0
641
+ )
642
+
643
+ return {
644
+ **self._stats,
645
+ "average_processing_time": avg_processing_time,
646
+ "filter_efficiency": (
647
+ self._stats["filtered_events"] / self._stats["total_events"]
648
+ if self._stats["total_events"] > 0 else 0
649
+ )
650
+ }
651
+
652
+ def reset_stats(self) -> None:
653
+ """重置统计信息"""
654
+ self._stats = {
655
+ "total_events": 0,
656
+ "filtered_events": 0,
657
+ "batched_events": 0,
658
+ "events_by_type": {},
659
+ "processing_times": []
660
+ }
@@ -0,0 +1,389 @@
1
+ """
2
+ 统一执行上下文 - Loom 框架四大核心能力的协调中心
3
+
4
+ 实现智能上下文在 TT 递归中组织复杂任务的能力
5
+ """
6
+
7
+ from dataclasses import dataclass, field
8
+ from typing import Optional, Dict, Any, List, TYPE_CHECKING
9
+ from pathlib import Path
10
+ import time
11
+ import uuid
12
+
13
+ from loom.core.context_assembly import ContextAssembler, ComponentPriority
14
+ from loom.builtin.tools.task import TaskTool
15
+ from loom.core.events import EventProcessor, EventFilter, AgentEventType
16
+
17
+ # 避免循环导入
18
+ if TYPE_CHECKING:
19
+ from loom.core.agent_executor import TaskHandler
20
+
21
+
22
+ @dataclass
23
+ class CoordinationConfig:
24
+ """
25
+ 统一协调配置类 - 管理所有协调参数
26
+
27
+ 集中管理魔法数字,便于调优和测试
28
+ """
29
+ # 任务分析阈值
30
+ deep_recursion_threshold: int = 3
31
+ """深度递归阈值 - 超过此深度会调整上下文策略"""
32
+
33
+ high_complexity_threshold: float = 0.7
34
+ """高复杂度阈值 - 超过此值会启用子代理"""
35
+
36
+ completion_score_threshold: float = 0.8
37
+ """任务完成度阈值 - 超过此值认为可以完成任务"""
38
+
39
+ # 缓存配置
40
+ context_cache_size: int = 100
41
+ """上下文组件缓存大小"""
42
+
43
+ subagent_pool_size: int = 5
44
+ """子代理池大小"""
45
+
46
+ # 事件处理配置
47
+ event_batch_size: int = 10
48
+ """事件批处理大小"""
49
+
50
+ event_batch_timeout: float = 0.05
51
+ """事件批处理超时时间(秒)- 降低延迟"""
52
+
53
+ # 性能目标
54
+ max_execution_time: float = 30.0
55
+ """最大执行时间(秒)"""
56
+
57
+ max_token_usage: float = 0.8
58
+ """最大 token 使用率"""
59
+
60
+ min_cache_hit_rate: float = 0.6
61
+ """最小缓存命中率"""
62
+
63
+ max_subagent_count: int = 3
64
+ """最大子代理数量"""
65
+
66
+
67
+ @dataclass
68
+ class UnifiedExecutionContext:
69
+ """
70
+ 统一的执行上下文,协调四大核心能力
71
+
72
+ 实现目标:
73
+ 1. 智能上下文在 TT 递归中组织复杂任务
74
+ 2. 动态调整策略和资源分配
75
+ 3. 统一的状态管理和性能优化
76
+ 4. 跨组件的协调和通信
77
+ """
78
+
79
+ # 基础信息
80
+ execution_id: str = field(default_factory=lambda: str(uuid.uuid4()))
81
+ correlation_id: Optional[str] = None
82
+ working_dir: Optional[Path] = None
83
+
84
+ # 协调配置
85
+ config: CoordinationConfig = field(default_factory=CoordinationConfig)
86
+
87
+ # 四大核心能力实例
88
+ context_assembler: Optional[ContextAssembler] = None
89
+ task_tool: Optional[TaskTool] = None
90
+ event_processor: Optional[EventProcessor] = None
91
+ task_handlers: List['TaskHandler'] = field(default_factory=list)
92
+
93
+ # 统一状态管理
94
+ execution_state: Dict[str, Any] = field(default_factory=dict)
95
+ performance_metrics: Dict[str, Any] = field(default_factory=dict)
96
+
97
+ # 运行时状态已移除 - 这些字段应该在 IntelligentCoordinator 中管理
98
+
99
+
100
+ class IntelligentCoordinator:
101
+ """
102
+ 智能协调器 - 统一管理四大核心能力
103
+
104
+ 核心功能:
105
+ 1. 协调 ContextAssembler 与 TT 递归的集成
106
+ 2. 管理 TaskTool 的子代理执行策略
107
+ 3. 优化 EventProcessor 的事件处理
108
+ 4. 动态调整 TaskHandler 的处理策略
109
+ """
110
+
111
+ def __init__(self, context: UnifiedExecutionContext):
112
+ self.context = context
113
+ self.config = context.config
114
+
115
+ # 运行时状态
116
+ self.current_task_type: Optional[str] = None
117
+ self.task_complexity_score: float = 0.0
118
+ self.recursion_context: Dict[str, Any] = {} # 快捷访问配置
119
+ self._strategy_history: List[Dict[str, Any]] = []
120
+
121
+ # 移除魔法属性注入方法 - 改为直接在协调器内部处理
122
+
123
+ def coordinate_tt_recursion(self,
124
+ messages: List,
125
+ turn_state,
126
+ context) -> List:
127
+ """协调 TT 递归执行"""
128
+
129
+ # 1. 分析任务类型和复杂度
130
+ self._analyze_task_context(messages, turn_state)
131
+
132
+ # 2. 智能上下文组装
133
+ assembled_context = self._intelligent_context_assembly(
134
+ messages, turn_state
135
+ )
136
+
137
+ # 3. 动态任务处理策略
138
+ task_strategy = self._determine_task_strategy(messages, turn_state)
139
+
140
+ # 4. 协调执行计划
141
+ execution_plan = self._create_execution_plan(
142
+ assembled_context, task_strategy
143
+ )
144
+
145
+ return execution_plan
146
+
147
+ def _analyze_task_context(self, messages: List, turn_state):
148
+ """分析任务上下文"""
149
+ if not messages:
150
+ return
151
+
152
+ # 分析任务类型
153
+ task_content = messages[0].content if messages else ""
154
+ self.current_task_type = self._classify_task_type(task_content)
155
+
156
+ # 计算任务复杂度
157
+ self.task_complexity_score = self._calculate_task_complexity(
158
+ task_content, turn_state
159
+ )
160
+
161
+ # 更新递归上下文
162
+ self.recursion_context.update({
163
+ "turn_counter": turn_state.turn_counter,
164
+ "max_iterations": turn_state.max_iterations,
165
+ "task_type": self.current_task_type,
166
+ "complexity": self.task_complexity_score
167
+ })
168
+
169
+ def _classify_task_type(self, task_content: str) -> str:
170
+ """分类任务类型"""
171
+ task_lower = task_content.lower()
172
+
173
+ if any(keyword in task_lower for keyword in ["analyze", "analysis", "分析"]):
174
+ return "analysis"
175
+ elif any(keyword in task_lower for keyword in ["generate", "create", "生成", "创建"]):
176
+ return "generation"
177
+ elif any(keyword in task_lower for keyword in ["sql", "query", "查询"]):
178
+ return "sql"
179
+ elif any(keyword in task_lower for keyword in ["test", "testing", "测试"]):
180
+ return "testing"
181
+ elif any(keyword in task_lower for keyword in ["report", "报告"]):
182
+ return "reporting"
183
+ else:
184
+ return "general"
185
+
186
+ def _calculate_task_complexity(self, task_content: str, turn_state) -> float:
187
+ """计算任务复杂度"""
188
+ complexity = 0.0
189
+
190
+ # 基于内容长度
191
+ complexity += min(len(task_content) / 1000, 0.3)
192
+
193
+ # 基于关键词数量
194
+ complex_keywords = ["complex", "multiple", "several", "various",
195
+ "复杂", "多个", "各种", "多种"]
196
+ keyword_count = sum(1 for kw in complex_keywords if kw in task_content.lower())
197
+ complexity += min(keyword_count * 0.1, 0.3)
198
+
199
+ # 基于递归深度
200
+ recursion_factor = turn_state.turn_counter / turn_state.max_iterations
201
+ complexity += recursion_factor * 0.4
202
+
203
+ return min(complexity, 1.0)
204
+
205
+ def _intelligent_context_assembly(self, messages: List, turn_state) -> str:
206
+ """智能上下文组装"""
207
+ if not self.context.context_assembler:
208
+ return ""
209
+
210
+ assembler = self.context.context_assembler
211
+
212
+ # 基于任务类型调整策略
213
+ task_type = self.current_task_type
214
+ complexity = self.task_complexity_score
215
+
216
+ if task_type == "analysis" and complexity > self.config.high_complexity_threshold:
217
+ # 复杂分析任务需要更多示例和指导
218
+ assembler.adjust_priority("examples", ComponentPriority.MEDIUM)
219
+ assembler.adjust_priority("analysis_guidelines", ComponentPriority.HIGH)
220
+
221
+ elif task_type == "sql" and complexity > 0.5:
222
+ # SQL 任务需要表结构和查询示例
223
+ assembler.adjust_priority("schema_info", ComponentPriority.HIGH)
224
+ assembler.adjust_priority("query_examples", ComponentPriority.MEDIUM)
225
+
226
+ # 基于递归深度调整优先级
227
+ recursion_depth = turn_state.turn_counter
228
+ if recursion_depth > self.config.deep_recursion_threshold:
229
+ # 深度递归时,优先保留核心指令
230
+ assembler.adjust_priority("base_instructions", ComponentPriority.CRITICAL)
231
+ assembler.adjust_priority("tool_definitions", ComponentPriority.HIGH)
232
+
233
+ # 降低示例优先级以节省空间
234
+ assembler.adjust_priority("examples", ComponentPriority.LOW)
235
+
236
+ return assembler.assemble()
237
+
238
+ def _determine_task_strategy(self, messages: List, turn_state) -> Dict[str, Any]:
239
+ """确定任务处理策略"""
240
+ strategy = {
241
+ "use_sub_agents": False,
242
+ "parallel_execution": False,
243
+ "context_priority": "balanced",
244
+ "event_batching": True,
245
+ "subagent_types": [],
246
+ "context_focus": []
247
+ }
248
+
249
+ task_type = self.current_task_type
250
+ complexity = self.task_complexity_score
251
+
252
+ # 基于任务复杂度决定是否使用子代理
253
+ if complexity > self.config.high_complexity_threshold:
254
+ strategy["use_sub_agents"] = True
255
+ strategy["parallel_execution"] = True
256
+
257
+ # 根据任务类型选择子代理类型
258
+ if task_type == "analysis":
259
+ strategy["subagent_types"] = ["code-analyzer", "quality-checker"]
260
+ elif task_type == "sql":
261
+ strategy["subagent_types"] = ["sql-expert", "data-analyzer"]
262
+ elif task_type == "reporting":
263
+ strategy["subagent_types"] = ["report-writer", "data-processor"]
264
+
265
+ # 基于递归深度调整策略
266
+ if turn_state.turn_counter > self.config.deep_recursion_threshold:
267
+ strategy["context_priority"] = "minimal"
268
+ strategy["event_batching"] = True
269
+ strategy["context_focus"] = ["base_instructions", "tool_definitions"]
270
+
271
+ return strategy
272
+
273
+ def _create_execution_plan(self,
274
+ assembled_context: str,
275
+ task_strategy: Dict[str, Any]) -> Dict[str, Any]:
276
+ """创建执行计划"""
277
+ return {
278
+ "context": assembled_context,
279
+ "strategy": task_strategy,
280
+ "coordinator_config": {
281
+ "task_type": self.current_task_type,
282
+ "complexity": self.task_complexity_score,
283
+ "recursion_context": self.recursion_context
284
+ },
285
+ "performance_targets": self._get_performance_targets(task_strategy)
286
+ }
287
+
288
+ def _get_performance_targets(self, strategy: Dict[str, Any]) -> Dict[str, Any]:
289
+ """获取性能目标"""
290
+ targets = {
291
+ "max_execution_time": self.config.max_execution_time,
292
+ "max_token_usage": self.config.max_token_usage,
293
+ "min_cache_hit_rate": self.config.min_cache_hit_rate,
294
+ "max_subagent_count": self.config.max_subagent_count
295
+ }
296
+
297
+ if strategy["use_sub_agents"]:
298
+ targets["max_execution_time"] = self.config.max_execution_time * 2
299
+ targets["max_subagent_count"] = len(strategy["subagent_types"])
300
+
301
+ return targets
302
+
303
+
304
+ def get_unified_metrics(self) -> Dict[str, Any]:
305
+ """获取统一的性能指标"""
306
+ metrics = {
307
+ "execution_id": self.context.execution_id,
308
+ "timestamp": time.time(),
309
+ "task_analysis": {
310
+ "task_type": self.current_task_type,
311
+ "complexity_score": self.task_complexity_score,
312
+ "recursion_context": self.recursion_context
313
+ }
314
+ }
315
+
316
+ # 收集各组件指标
317
+ if self.context.context_assembler:
318
+ metrics["context_assembler"] = self.context.context_assembler.get_component_stats()
319
+
320
+ if self.context.task_tool:
321
+ metrics["task_tool"] = self.context.task_tool.get_pool_stats()
322
+
323
+ if self.context.event_processor:
324
+ metrics["event_processor"] = self.context.event_processor.get_stats()
325
+
326
+ return metrics
327
+
328
+ def adjust_strategy_based_on_performance(self,
329
+ current_metrics: Dict[str, Any]):
330
+ """基于性能指标动态调整策略"""
331
+
332
+ # 分析性能瓶颈
333
+ bottlenecks = self._identify_bottlenecks(current_metrics)
334
+
335
+ adjustments_made = []
336
+
337
+ for bottleneck in bottlenecks:
338
+ if bottleneck == "context_assembly_slow":
339
+ # 调整上下文组装策略
340
+ if self.context.context_assembler:
341
+ self.context.context_assembler.enable_caching = True
342
+ self.context.context_assembler._cache_size = self.config.context_cache_size * 2
343
+ adjustments_made.append("增加上下文缓存大小")
344
+
345
+ elif bottleneck == "sub_agent_creation_overhead":
346
+ # 调整子代理池策略
347
+ if self.context.task_tool:
348
+ self.context.task_tool.pool_size = self.config.subagent_pool_size * 2
349
+ self.context.task_tool.enable_pooling = True
350
+ adjustments_made.append("增加子代理池大小")
351
+
352
+ elif bottleneck == "event_processing_latency":
353
+ # 调整事件处理策略
354
+ if self.context.event_processor:
355
+ for filter_obj in self.context.event_processor.filters:
356
+ filter_obj.batch_size = self.config.event_batch_size * 2
357
+ filter_obj.batch_timeout = self.config.event_batch_timeout / 2
358
+ adjustments_made.append("优化事件处理批量设置")
359
+
360
+ # 记录策略调整历史
361
+ self._strategy_history.append({
362
+ "timestamp": time.time(),
363
+ "bottlenecks": bottlenecks,
364
+ "adjustments": adjustments_made
365
+ })
366
+
367
+ def _identify_bottlenecks(self, metrics: Dict[str, Any]) -> List[str]:
368
+ """识别性能瓶颈"""
369
+ bottlenecks = []
370
+
371
+ # 检查上下文组装性能
372
+ if "context_assembler" in metrics:
373
+ ca_metrics = metrics["context_assembler"]
374
+ if ca_metrics.get("budget_utilization", 0) > 0.9:
375
+ bottlenecks.append("context_assembly_slow")
376
+
377
+ # 检查子代理性能
378
+ if "task_tool" in metrics:
379
+ tt_metrics = metrics["task_tool"]
380
+ if tt_metrics.get("cache_hit_rate", 0) < 0.3:
381
+ bottlenecks.append("sub_agent_creation_overhead")
382
+
383
+ # 检查事件处理性能
384
+ if "event_processor" in metrics:
385
+ ep_metrics = metrics["event_processor"]
386
+ if ep_metrics.get("average_processing_time", 0) > 0.1:
387
+ bottlenecks.append("event_processing_latency")
388
+
389
+ return bottlenecks