loom-agent 0.0.2__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of loom-agent might be problematic. Click here for more details.
- loom/builtin/tools/task.py +100 -0
- loom/core/agent_executor.py +310 -39
- loom/core/context_assembly.py +115 -7
- loom/core/events.py +246 -0
- loom/core/unified_coordination.py +389 -0
- {loom_agent-0.0.2.dist-info → loom_agent-0.0.3.dist-info}/METADATA +22 -25
- {loom_agent-0.0.2.dist-info → loom_agent-0.0.3.dist-info}/RECORD +9 -8
- {loom_agent-0.0.2.dist-info → loom_agent-0.0.3.dist-info}/WHEEL +0 -0
- {loom_agent-0.0.2.dist-info → loom_agent-0.0.3.dist-info}/licenses/LICENSE +0 -0
loom/core/context_assembly.py
CHANGED
|
@@ -10,8 +10,9 @@ overwritten by system prompts.
|
|
|
10
10
|
|
|
11
11
|
from __future__ import annotations
|
|
12
12
|
|
|
13
|
+
import hashlib
|
|
13
14
|
from dataclasses import dataclass
|
|
14
|
-
from typing import Dict, List, Optional, Callable
|
|
15
|
+
from typing import Dict, List, Optional, Callable, Any
|
|
15
16
|
from enum import IntEnum
|
|
16
17
|
|
|
17
18
|
|
|
@@ -56,6 +57,9 @@ class ContextAssembler:
|
|
|
56
57
|
- Token budget management
|
|
57
58
|
- Smart truncation of low-priority components
|
|
58
59
|
- Guarantee high-priority component integrity
|
|
60
|
+
- Component caching for performance
|
|
61
|
+
- Dynamic priority adjustment
|
|
62
|
+
- Context reuse optimization
|
|
59
63
|
|
|
60
64
|
Example:
|
|
61
65
|
```python
|
|
@@ -85,7 +89,9 @@ class ContextAssembler:
|
|
|
85
89
|
self,
|
|
86
90
|
max_tokens: int = 16000,
|
|
87
91
|
token_counter: Optional[Callable[[str], int]] = None,
|
|
88
|
-
token_buffer: float = 0.9 # Use 90% of budget for safety
|
|
92
|
+
token_buffer: float = 0.9, # Use 90% of budget for safety
|
|
93
|
+
enable_caching: bool = True,
|
|
94
|
+
cache_size: int = 100
|
|
89
95
|
):
|
|
90
96
|
"""
|
|
91
97
|
Initialize the context assembler.
|
|
@@ -94,10 +100,19 @@ class ContextAssembler:
|
|
|
94
100
|
max_tokens: Maximum token budget
|
|
95
101
|
token_counter: Custom token counting function (defaults to simple estimation)
|
|
96
102
|
token_buffer: Safety buffer ratio (0.9 = use 90% of max_tokens)
|
|
103
|
+
enable_caching: Enable component caching for performance
|
|
104
|
+
cache_size: Maximum number of cached components
|
|
97
105
|
"""
|
|
98
106
|
self.max_tokens = int(max_tokens * token_buffer)
|
|
99
107
|
self.token_counter = token_counter or self._estimate_tokens
|
|
100
108
|
self.components: List[ContextComponent] = []
|
|
109
|
+
|
|
110
|
+
# Performance optimizations
|
|
111
|
+
self.enable_caching = enable_caching
|
|
112
|
+
self._component_cache: Dict[str, ContextComponent] = {}
|
|
113
|
+
self._cache_size = cache_size
|
|
114
|
+
self._assembly_cache: Optional[str] = None
|
|
115
|
+
self._last_components_hash: Optional[str] = None
|
|
101
116
|
|
|
102
117
|
def add_component(
|
|
103
118
|
self,
|
|
@@ -127,16 +142,26 @@ class ContextAssembler:
|
|
|
127
142
|
truncatable=truncatable
|
|
128
143
|
)
|
|
129
144
|
self.components.append(component)
|
|
145
|
+
|
|
146
|
+
# Update cache if enabled
|
|
147
|
+
if self.enable_caching:
|
|
148
|
+
self._component_cache[name] = component
|
|
149
|
+
# Maintain cache size limit
|
|
150
|
+
if len(self._component_cache) > self._cache_size:
|
|
151
|
+
# Remove oldest entries (simple LRU)
|
|
152
|
+
oldest_key = next(iter(self._component_cache))
|
|
153
|
+
del self._component_cache[oldest_key]
|
|
130
154
|
|
|
131
155
|
def assemble(self) -> str:
|
|
132
156
|
"""
|
|
133
157
|
Assemble the final context from all components.
|
|
134
158
|
|
|
135
159
|
Strategy:
|
|
136
|
-
1.
|
|
137
|
-
2.
|
|
138
|
-
3.
|
|
139
|
-
4.
|
|
160
|
+
1. Check cache for identical component configuration
|
|
161
|
+
2. Sort components by priority (descending)
|
|
162
|
+
3. Add components until budget is reached
|
|
163
|
+
4. Truncate low-priority components if needed
|
|
164
|
+
5. Merge all components into final string
|
|
140
165
|
|
|
141
166
|
Returns:
|
|
142
167
|
Assembled context string
|
|
@@ -144,6 +169,13 @@ class ContextAssembler:
|
|
|
144
169
|
if not self.components:
|
|
145
170
|
return ""
|
|
146
171
|
|
|
172
|
+
# Check cache if enabled
|
|
173
|
+
if self.enable_caching:
|
|
174
|
+
current_hash = self._get_components_hash()
|
|
175
|
+
if (self._assembly_cache is not None and
|
|
176
|
+
self._last_components_hash == current_hash):
|
|
177
|
+
return self._assembly_cache
|
|
178
|
+
|
|
147
179
|
# Sort by priority (highest first)
|
|
148
180
|
sorted_components = sorted(
|
|
149
181
|
self.components,
|
|
@@ -165,7 +197,83 @@ class ContextAssembler:
|
|
|
165
197
|
header = f"# {component.name.replace('_', ' ').upper()}"
|
|
166
198
|
sections.append(f"{header}\n{component.content}")
|
|
167
199
|
|
|
168
|
-
|
|
200
|
+
result = "\n\n".join(sections)
|
|
201
|
+
|
|
202
|
+
# Update cache if enabled
|
|
203
|
+
if self.enable_caching:
|
|
204
|
+
self._assembly_cache = result
|
|
205
|
+
self._last_components_hash = self._get_components_hash()
|
|
206
|
+
|
|
207
|
+
return result
|
|
208
|
+
|
|
209
|
+
def _get_components_hash(self) -> str:
|
|
210
|
+
"""
|
|
211
|
+
Generate hash for current component configuration
|
|
212
|
+
|
|
213
|
+
优化版本:
|
|
214
|
+
- 使用 blake2b 替代 MD5(更快)
|
|
215
|
+
- 直接update字节而非拼接字符串
|
|
216
|
+
- 移除不必要的排序
|
|
217
|
+
"""
|
|
218
|
+
# 使用 blake2b,比 MD5 更快且安全
|
|
219
|
+
hasher = hashlib.blake2b(digest_size=16)
|
|
220
|
+
|
|
221
|
+
# 直接更新hasher,避免字符串拼接
|
|
222
|
+
for comp in self.components:
|
|
223
|
+
hasher.update(comp.name.encode())
|
|
224
|
+
hasher.update(str(comp.priority).encode())
|
|
225
|
+
hasher.update(str(comp.token_count).encode())
|
|
226
|
+
hasher.update(b'1' if comp.truncatable else b'0')
|
|
227
|
+
|
|
228
|
+
return hasher.hexdigest()
|
|
229
|
+
|
|
230
|
+
def adjust_priority(self, component_name: str, new_priority: int) -> bool:
|
|
231
|
+
"""
|
|
232
|
+
Dynamically adjust component priority.
|
|
233
|
+
|
|
234
|
+
Args:
|
|
235
|
+
component_name: Name of the component to adjust
|
|
236
|
+
new_priority: New priority value
|
|
237
|
+
|
|
238
|
+
Returns:
|
|
239
|
+
True if component was found and adjusted, False otherwise
|
|
240
|
+
"""
|
|
241
|
+
for component in self.components:
|
|
242
|
+
if component.name == component_name:
|
|
243
|
+
component.priority = new_priority
|
|
244
|
+
# Clear cache since configuration changed
|
|
245
|
+
if self.enable_caching:
|
|
246
|
+
self._assembly_cache = None
|
|
247
|
+
self._last_components_hash = None
|
|
248
|
+
return True
|
|
249
|
+
return False
|
|
250
|
+
|
|
251
|
+
def get_component_stats(self) -> Dict[str, Any]:
|
|
252
|
+
"""Get statistics about current components"""
|
|
253
|
+
if not self.components:
|
|
254
|
+
return {"total_components": 0, "total_tokens": 0}
|
|
255
|
+
|
|
256
|
+
total_tokens = sum(c.token_count for c in self.components)
|
|
257
|
+
priority_distribution = {}
|
|
258
|
+
|
|
259
|
+
for comp in self.components:
|
|
260
|
+
priority_distribution[comp.priority] = priority_distribution.get(comp.priority, 0) + 1
|
|
261
|
+
|
|
262
|
+
return {
|
|
263
|
+
"total_components": len(self.components),
|
|
264
|
+
"total_tokens": total_tokens,
|
|
265
|
+
"budget_utilization": total_tokens / self.max_tokens if self.max_tokens > 0 else 0,
|
|
266
|
+
"priority_distribution": priority_distribution,
|
|
267
|
+
"cache_enabled": self.enable_caching,
|
|
268
|
+
"cache_size": len(self._component_cache) if self.enable_caching else 0
|
|
269
|
+
}
|
|
270
|
+
|
|
271
|
+
def clear_cache(self) -> None:
|
|
272
|
+
"""Clear all caches"""
|
|
273
|
+
if self.enable_caching:
|
|
274
|
+
self._component_cache.clear()
|
|
275
|
+
self._assembly_cache = None
|
|
276
|
+
self._last_components_hash = None
|
|
169
277
|
|
|
170
278
|
def _truncate_components(
|
|
171
279
|
self,
|
loom/core/events.py
CHANGED
|
@@ -4,6 +4,12 @@ Agent Event System for Loom 2.0
|
|
|
4
4
|
This module defines the unified event model for streaming agent execution.
|
|
5
5
|
Inspired by Claude Code's event-driven architecture.
|
|
6
6
|
|
|
7
|
+
新特性 (Loom 0.0.3):
|
|
8
|
+
- 事件过滤和批量处理
|
|
9
|
+
- 智能事件聚合
|
|
10
|
+
- 性能优化的事件流
|
|
11
|
+
- 事件优先级管理
|
|
12
|
+
|
|
7
13
|
Example:
|
|
8
14
|
```python
|
|
9
15
|
agent = Agent(llm=llm, tools=tools)
|
|
@@ -412,3 +418,243 @@ class EventCollector:
|
|
|
412
418
|
if finish_events:
|
|
413
419
|
return finish_events[-1].content
|
|
414
420
|
return None
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
class EventFilter:
|
|
424
|
+
"""
|
|
425
|
+
事件过滤器 - 提供高级事件过滤和批量处理能力
|
|
426
|
+
|
|
427
|
+
新特性 (Loom 0.0.3):
|
|
428
|
+
- 智能事件过滤
|
|
429
|
+
- 批量事件处理
|
|
430
|
+
- 事件聚合和合并
|
|
431
|
+
- 性能优化的事件流
|
|
432
|
+
"""
|
|
433
|
+
|
|
434
|
+
def __init__(self,
|
|
435
|
+
allowed_types: Optional[List[AgentEventType]] = None,
|
|
436
|
+
blocked_types: Optional[List[AgentEventType]] = None,
|
|
437
|
+
enable_batching: bool = True,
|
|
438
|
+
batch_size: int = 10,
|
|
439
|
+
batch_timeout: float = 0.1):
|
|
440
|
+
"""
|
|
441
|
+
初始化事件过滤器
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
allowed_types: 允许的事件类型列表(None = 全部允许)
|
|
445
|
+
blocked_types: 阻止的事件类型列表
|
|
446
|
+
enable_batching: 启用批量处理
|
|
447
|
+
batch_size: 批量大小
|
|
448
|
+
batch_timeout: 批量超时时间(秒)
|
|
449
|
+
"""
|
|
450
|
+
self.allowed_types = allowed_types
|
|
451
|
+
self.blocked_types = blocked_types or []
|
|
452
|
+
self.enable_batching = enable_batching
|
|
453
|
+
self.batch_size = batch_size
|
|
454
|
+
self.batch_timeout = batch_timeout
|
|
455
|
+
|
|
456
|
+
# 批量处理状态
|
|
457
|
+
self._batch_buffer: List[AgentEvent] = []
|
|
458
|
+
self._last_batch_time = time.time()
|
|
459
|
+
|
|
460
|
+
def should_include(self, event: AgentEvent) -> bool:
|
|
461
|
+
"""判断事件是否应该被包含"""
|
|
462
|
+
# 检查允许的类型
|
|
463
|
+
if self.allowed_types and event.type not in self.allowed_types:
|
|
464
|
+
return False
|
|
465
|
+
|
|
466
|
+
# 检查阻止的类型
|
|
467
|
+
if event.type in self.blocked_types:
|
|
468
|
+
return False
|
|
469
|
+
|
|
470
|
+
return True
|
|
471
|
+
|
|
472
|
+
def process_event(self, event: AgentEvent) -> List[AgentEvent]:
|
|
473
|
+
"""
|
|
474
|
+
处理单个事件,可能返回批量事件
|
|
475
|
+
|
|
476
|
+
Returns:
|
|
477
|
+
处理后的事件列表
|
|
478
|
+
"""
|
|
479
|
+
if not self.should_include(event):
|
|
480
|
+
return []
|
|
481
|
+
|
|
482
|
+
if not self.enable_batching:
|
|
483
|
+
return [event]
|
|
484
|
+
|
|
485
|
+
# 添加到批量缓冲区
|
|
486
|
+
self._batch_buffer.append(event)
|
|
487
|
+
|
|
488
|
+
# 检查是否需要输出批量事件
|
|
489
|
+
should_flush = (
|
|
490
|
+
len(self._batch_buffer) >= self.batch_size or
|
|
491
|
+
(time.time() - self._last_batch_time) >= self.batch_timeout
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
if should_flush:
|
|
495
|
+
return self._flush_batch()
|
|
496
|
+
|
|
497
|
+
return []
|
|
498
|
+
|
|
499
|
+
def _flush_batch(self) -> List[AgentEvent]:
|
|
500
|
+
"""输出批量事件并清空缓冲区"""
|
|
501
|
+
if not self._batch_buffer:
|
|
502
|
+
return []
|
|
503
|
+
|
|
504
|
+
# 智能聚合相同类型的事件
|
|
505
|
+
aggregated_events = self._aggregate_events(self._batch_buffer)
|
|
506
|
+
|
|
507
|
+
# 清空缓冲区
|
|
508
|
+
self._batch_buffer.clear()
|
|
509
|
+
self._last_batch_time = time.time()
|
|
510
|
+
|
|
511
|
+
return aggregated_events
|
|
512
|
+
|
|
513
|
+
def _aggregate_events(self, events: List[AgentEvent]) -> List[AgentEvent]:
|
|
514
|
+
"""智能聚合事件"""
|
|
515
|
+
if not events:
|
|
516
|
+
return []
|
|
517
|
+
|
|
518
|
+
# 按类型分组
|
|
519
|
+
events_by_type: Dict[AgentEventType, List[AgentEvent]] = {}
|
|
520
|
+
for event in events:
|
|
521
|
+
if event.type not in events_by_type:
|
|
522
|
+
events_by_type[event.type] = []
|
|
523
|
+
events_by_type[event.type].append(event)
|
|
524
|
+
|
|
525
|
+
aggregated = []
|
|
526
|
+
|
|
527
|
+
for event_type, type_events in events_by_type.items():
|
|
528
|
+
if event_type == AgentEventType.LLM_DELTA:
|
|
529
|
+
# 合并 LLM delta 事件
|
|
530
|
+
merged_content = "".join(e.content or "" for e in type_events)
|
|
531
|
+
if merged_content:
|
|
532
|
+
# 创建合并的事件
|
|
533
|
+
merged_event = AgentEvent(
|
|
534
|
+
type=AgentEventType.LLM_DELTA,
|
|
535
|
+
content=merged_content,
|
|
536
|
+
timestamp=type_events[0].timestamp,
|
|
537
|
+
metadata={
|
|
538
|
+
"batch_size": len(type_events),
|
|
539
|
+
"aggregated": True
|
|
540
|
+
}
|
|
541
|
+
)
|
|
542
|
+
aggregated.append(merged_event)
|
|
543
|
+
else:
|
|
544
|
+
# 其他类型的事件保持原样
|
|
545
|
+
aggregated.extend(type_events)
|
|
546
|
+
|
|
547
|
+
return aggregated
|
|
548
|
+
|
|
549
|
+
def flush_remaining(self) -> List[AgentEvent]:
|
|
550
|
+
"""强制输出剩余的事件"""
|
|
551
|
+
return self._flush_batch()
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
class EventProcessor:
|
|
555
|
+
"""
|
|
556
|
+
事件处理器 - 提供高级事件处理能力
|
|
557
|
+
|
|
558
|
+
新特性 (Loom 0.0.3):
|
|
559
|
+
- 事件优先级管理
|
|
560
|
+
- 智能事件路由
|
|
561
|
+
- 事件统计和分析
|
|
562
|
+
- 性能监控
|
|
563
|
+
"""
|
|
564
|
+
|
|
565
|
+
def __init__(self,
|
|
566
|
+
filters: Optional[List[EventFilter]] = None,
|
|
567
|
+
enable_stats: bool = True):
|
|
568
|
+
"""
|
|
569
|
+
初始化事件处理器
|
|
570
|
+
|
|
571
|
+
Args:
|
|
572
|
+
filters: 事件过滤器列表
|
|
573
|
+
enable_stats: 启用统计功能
|
|
574
|
+
"""
|
|
575
|
+
self.filters = filters or []
|
|
576
|
+
self.enable_stats = enable_stats
|
|
577
|
+
|
|
578
|
+
# 统计信息
|
|
579
|
+
self._stats = {
|
|
580
|
+
"total_events": 0,
|
|
581
|
+
"filtered_events": 0,
|
|
582
|
+
"batched_events": 0,
|
|
583
|
+
"events_by_type": {},
|
|
584
|
+
"processing_times": []
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
def process_events(self, events: List[AgentEvent]) -> List[AgentEvent]:
|
|
588
|
+
"""
|
|
589
|
+
批量处理事件
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
events: 输入事件列表
|
|
593
|
+
|
|
594
|
+
Returns:
|
|
595
|
+
处理后的事件列表
|
|
596
|
+
"""
|
|
597
|
+
if not events:
|
|
598
|
+
return []
|
|
599
|
+
|
|
600
|
+
start_time = time.time()
|
|
601
|
+
processed_events = []
|
|
602
|
+
|
|
603
|
+
for event in events:
|
|
604
|
+
# 更新统计
|
|
605
|
+
if self.enable_stats:
|
|
606
|
+
self._stats["total_events"] += 1
|
|
607
|
+
event_type = event.type.value
|
|
608
|
+
self._stats["events_by_type"][event_type] = \
|
|
609
|
+
self._stats["events_by_type"].get(event_type, 0) + 1
|
|
610
|
+
|
|
611
|
+
# 应用过滤器
|
|
612
|
+
for filter_obj in self.filters:
|
|
613
|
+
filtered = filter_obj.process_event(event)
|
|
614
|
+
processed_events.extend(filtered)
|
|
615
|
+
|
|
616
|
+
# 如果没有过滤器,直接添加事件
|
|
617
|
+
if not self.filters:
|
|
618
|
+
processed_events.append(event)
|
|
619
|
+
|
|
620
|
+
# 强制刷新所有过滤器的批量缓冲区
|
|
621
|
+
for filter_obj in self.filters:
|
|
622
|
+
remaining = filter_obj.flush_remaining()
|
|
623
|
+
processed_events.extend(remaining)
|
|
624
|
+
|
|
625
|
+
# 更新处理时间统计
|
|
626
|
+
if self.enable_stats:
|
|
627
|
+
processing_time = time.time() - start_time
|
|
628
|
+
self._stats["processing_times"].append(processing_time)
|
|
629
|
+
self._stats["filtered_events"] = len(processed_events)
|
|
630
|
+
|
|
631
|
+
return processed_events
|
|
632
|
+
|
|
633
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
634
|
+
"""获取处理统计信息"""
|
|
635
|
+
if not self.enable_stats:
|
|
636
|
+
return {}
|
|
637
|
+
|
|
638
|
+
avg_processing_time = (
|
|
639
|
+
sum(self._stats["processing_times"]) / len(self._stats["processing_times"])
|
|
640
|
+
if self._stats["processing_times"] else 0
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
return {
|
|
644
|
+
**self._stats,
|
|
645
|
+
"average_processing_time": avg_processing_time,
|
|
646
|
+
"filter_efficiency": (
|
|
647
|
+
self._stats["filtered_events"] / self._stats["total_events"]
|
|
648
|
+
if self._stats["total_events"] > 0 else 0
|
|
649
|
+
)
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
def reset_stats(self) -> None:
|
|
653
|
+
"""重置统计信息"""
|
|
654
|
+
self._stats = {
|
|
655
|
+
"total_events": 0,
|
|
656
|
+
"filtered_events": 0,
|
|
657
|
+
"batched_events": 0,
|
|
658
|
+
"events_by_type": {},
|
|
659
|
+
"processing_times": []
|
|
660
|
+
}
|