loom-agent 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of loom-agent might be problematic. Click here for more details.
- loom/builtin/tools/calculator.py +4 -0
- loom/builtin/tools/document_search.py +5 -0
- loom/builtin/tools/glob.py +4 -0
- loom/builtin/tools/grep.py +4 -0
- loom/builtin/tools/http_request.py +5 -0
- loom/builtin/tools/python_repl.py +5 -0
- loom/builtin/tools/read_file.py +4 -0
- loom/builtin/tools/task.py +105 -0
- loom/builtin/tools/web_search.py +4 -0
- loom/builtin/tools/write_file.py +4 -0
- loom/components/agent.py +121 -5
- loom/core/agent_executor.py +777 -321
- loom/core/compression_manager.py +17 -10
- loom/core/context_assembly.py +437 -0
- loom/core/events.py +660 -0
- loom/core/execution_context.py +119 -0
- loom/core/tool_orchestrator.py +383 -0
- loom/core/turn_state.py +188 -0
- loom/core/types.py +15 -4
- loom/core/unified_coordination.py +389 -0
- loom/interfaces/event_producer.py +172 -0
- loom/interfaces/tool.py +22 -1
- loom/security/__init__.py +13 -0
- loom/security/models.py +85 -0
- loom/security/path_validator.py +128 -0
- loom/security/validator.py +346 -0
- loom/tasks/PHASE_1_FOUNDATION/task_1.1_agent_events.md +121 -0
- loom/tasks/PHASE_1_FOUNDATION/task_1.2_streaming_api.md +521 -0
- loom/tasks/PHASE_1_FOUNDATION/task_1.3_context_assembler.md +606 -0
- loom/tasks/PHASE_2_CORE_FEATURES/task_2.1_tool_orchestrator.md +743 -0
- loom/tasks/PHASE_2_CORE_FEATURES/task_2.2_security_validator.md +676 -0
- loom/tasks/README.md +109 -0
- loom/tasks/__init__.py +11 -0
- loom/tasks/sql_placeholder.py +100 -0
- loom_agent-0.0.3.dist-info/METADATA +292 -0
- {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/RECORD +38 -19
- loom_agent-0.0.1.dist-info/METADATA +0 -457
- {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/WHEEL +0 -0
- {loom_agent-0.0.1.dist-info → loom_agent-0.0.3.dist-info}/licenses/LICENSE +0 -0
loom/core/events.py
ADDED
|
@@ -0,0 +1,660 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Event System for Loom 2.0
|
|
3
|
+
|
|
4
|
+
This module defines the unified event model for streaming agent execution.
|
|
5
|
+
Inspired by Claude Code's event-driven architecture.
|
|
6
|
+
|
|
7
|
+
新特性 (Loom 0.0.3):
|
|
8
|
+
- 事件过滤和批量处理
|
|
9
|
+
- 智能事件聚合
|
|
10
|
+
- 性能优化的事件流
|
|
11
|
+
- 事件优先级管理
|
|
12
|
+
|
|
13
|
+
Example:
|
|
14
|
+
```python
|
|
15
|
+
agent = Agent(llm=llm, tools=tools)
|
|
16
|
+
|
|
17
|
+
async for event in agent.execute("Search for TODO comments"):
|
|
18
|
+
if event.type == AgentEventType.LLM_DELTA:
|
|
19
|
+
print(event.content, end="", flush=True)
|
|
20
|
+
elif event.type == AgentEventType.TOOL_PROGRESS:
|
|
21
|
+
print(f"\\nTool: {event.metadata['tool_name']}")
|
|
22
|
+
elif event.type == AgentEventType.AGENT_FINISH:
|
|
23
|
+
print(f"\\n✓ {event.content}")
|
|
24
|
+
```
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
from dataclasses import dataclass, field
|
|
28
|
+
from enum import Enum
|
|
29
|
+
from typing import Optional, Dict, Any, List
|
|
30
|
+
import time
|
|
31
|
+
import uuid
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class AgentEventType(Enum):
|
|
35
|
+
"""
|
|
36
|
+
Agent event types for different execution phases.
|
|
37
|
+
|
|
38
|
+
Event Categories:
|
|
39
|
+
- Phase Events: Lifecycle events for execution phases
|
|
40
|
+
- Context Events: Context assembly and management
|
|
41
|
+
- RAG Events: Retrieval-augmented generation events
|
|
42
|
+
- LLM Events: Language model interaction events
|
|
43
|
+
- Tool Events: Tool execution and progress
|
|
44
|
+
- Agent Events: High-level agent state changes
|
|
45
|
+
- Error Events: Error handling and recovery
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
# ===== Phase Events =====
|
|
49
|
+
PHASE_START = "phase_start"
|
|
50
|
+
"""A new execution phase has started"""
|
|
51
|
+
|
|
52
|
+
PHASE_END = "phase_end"
|
|
53
|
+
"""An execution phase has completed"""
|
|
54
|
+
|
|
55
|
+
# ===== Context Events =====
|
|
56
|
+
CONTEXT_ASSEMBLY_START = "context_assembly_start"
|
|
57
|
+
"""Starting to assemble system context"""
|
|
58
|
+
|
|
59
|
+
CONTEXT_ASSEMBLY_COMPLETE = "context_assembly_complete"
|
|
60
|
+
"""System context assembly completed"""
|
|
61
|
+
|
|
62
|
+
COMPRESSION_APPLIED = "compression_applied"
|
|
63
|
+
"""Conversation history was compressed"""
|
|
64
|
+
|
|
65
|
+
# ===== RAG Events =====
|
|
66
|
+
RETRIEVAL_START = "retrieval_start"
|
|
67
|
+
"""Starting document retrieval"""
|
|
68
|
+
|
|
69
|
+
RETRIEVAL_PROGRESS = "retrieval_progress"
|
|
70
|
+
"""Progress update during retrieval (documents found)"""
|
|
71
|
+
|
|
72
|
+
RETRIEVAL_COMPLETE = "retrieval_complete"
|
|
73
|
+
"""Document retrieval completed"""
|
|
74
|
+
|
|
75
|
+
# ===== LLM Events =====
|
|
76
|
+
LLM_START = "llm_start"
|
|
77
|
+
"""LLM call initiated"""
|
|
78
|
+
|
|
79
|
+
LLM_DELTA = "llm_delta"
|
|
80
|
+
"""Streaming text chunk from LLM"""
|
|
81
|
+
|
|
82
|
+
LLM_COMPLETE = "llm_complete"
|
|
83
|
+
"""LLM call completed"""
|
|
84
|
+
|
|
85
|
+
LLM_TOOL_CALLS = "llm_tool_calls"
|
|
86
|
+
"""LLM requested tool calls"""
|
|
87
|
+
|
|
88
|
+
# ===== Tool Events =====
|
|
89
|
+
TOOL_CALLS_START = "tool_calls_start"
|
|
90
|
+
"""Starting to execute tool calls"""
|
|
91
|
+
|
|
92
|
+
TOOL_EXECUTION_START = "tool_execution_start"
|
|
93
|
+
"""Individual tool execution started"""
|
|
94
|
+
|
|
95
|
+
TOOL_PROGRESS = "tool_progress"
|
|
96
|
+
"""Progress update from tool execution"""
|
|
97
|
+
|
|
98
|
+
TOOL_RESULT = "tool_result"
|
|
99
|
+
"""Tool execution completed with result"""
|
|
100
|
+
|
|
101
|
+
TOOL_ERROR = "tool_error"
|
|
102
|
+
"""Tool execution failed"""
|
|
103
|
+
|
|
104
|
+
TOOL_CALLS_COMPLETE = "tool_calls_complete"
|
|
105
|
+
"""All tool calls completed (batch execution finished)"""
|
|
106
|
+
|
|
107
|
+
# ===== Agent Events =====
|
|
108
|
+
ITERATION_START = "iteration_start"
|
|
109
|
+
"""New agent iteration started (for recursive loops)"""
|
|
110
|
+
|
|
111
|
+
ITERATION_END = "iteration_end"
|
|
112
|
+
"""Agent iteration completed"""
|
|
113
|
+
|
|
114
|
+
RECURSION = "recursion"
|
|
115
|
+
"""Recursive call initiated (tt mode)"""
|
|
116
|
+
|
|
117
|
+
AGENT_FINISH = "agent_finish"
|
|
118
|
+
"""Agent execution finished successfully"""
|
|
119
|
+
|
|
120
|
+
MAX_ITERATIONS_REACHED = "max_iterations_reached"
|
|
121
|
+
"""Maximum iteration limit reached"""
|
|
122
|
+
|
|
123
|
+
EXECUTION_CANCELLED = "execution_cancelled"
|
|
124
|
+
"""Execution was cancelled via cancel_token"""
|
|
125
|
+
|
|
126
|
+
# ===== Error Events =====
|
|
127
|
+
ERROR = "error"
|
|
128
|
+
"""Error occurred during execution"""
|
|
129
|
+
|
|
130
|
+
RECOVERY_ATTEMPT = "recovery_attempt"
|
|
131
|
+
"""Attempting to recover from error"""
|
|
132
|
+
|
|
133
|
+
RECOVERY_SUCCESS = "recovery_success"
|
|
134
|
+
"""Error recovery succeeded"""
|
|
135
|
+
|
|
136
|
+
RECOVERY_FAILED = "recovery_failed"
|
|
137
|
+
"""Error recovery failed"""
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
@dataclass
|
|
141
|
+
class ToolCall:
|
|
142
|
+
"""Represents a tool invocation request from the LLM"""
|
|
143
|
+
|
|
144
|
+
id: str
|
|
145
|
+
"""Unique identifier for this tool call"""
|
|
146
|
+
|
|
147
|
+
name: str
|
|
148
|
+
"""Name of the tool to execute"""
|
|
149
|
+
|
|
150
|
+
arguments: Dict[str, Any]
|
|
151
|
+
"""Arguments to pass to the tool"""
|
|
152
|
+
|
|
153
|
+
def __post_init__(self):
|
|
154
|
+
if not self.id:
|
|
155
|
+
self.id = f"call_{uuid.uuid4().hex[:8]}"
|
|
156
|
+
|
|
157
|
+
|
|
158
|
+
@dataclass
|
|
159
|
+
class ToolResult:
|
|
160
|
+
"""Represents the result of a tool execution"""
|
|
161
|
+
|
|
162
|
+
tool_call_id: str
|
|
163
|
+
"""ID of the tool call this result corresponds to"""
|
|
164
|
+
|
|
165
|
+
tool_name: str
|
|
166
|
+
"""Name of the tool that was executed"""
|
|
167
|
+
|
|
168
|
+
content: str
|
|
169
|
+
"""Result content (or error message)"""
|
|
170
|
+
|
|
171
|
+
is_error: bool = False
|
|
172
|
+
"""Whether this result represents an error"""
|
|
173
|
+
|
|
174
|
+
execution_time_ms: Optional[float] = None
|
|
175
|
+
"""Time taken to execute the tool in milliseconds"""
|
|
176
|
+
|
|
177
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
178
|
+
"""Additional metadata about the execution"""
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
@dataclass
|
|
182
|
+
class AgentEvent:
|
|
183
|
+
"""
|
|
184
|
+
Unified event model for agent execution streaming.
|
|
185
|
+
|
|
186
|
+
All components in Loom 2.0 produce AgentEvent instances to communicate
|
|
187
|
+
their state and progress. This enables:
|
|
188
|
+
- Real-time progress updates to users
|
|
189
|
+
- Fine-grained control over execution flow
|
|
190
|
+
- Debugging and observability
|
|
191
|
+
- Flexible consumption patterns
|
|
192
|
+
|
|
193
|
+
Attributes:
|
|
194
|
+
type: The type of event (see AgentEventType)
|
|
195
|
+
timestamp: Unix timestamp when event was created
|
|
196
|
+
phase: Optional execution phase name (e.g., "context", "retrieval", "llm")
|
|
197
|
+
content: Optional text content (for LLM deltas, final responses)
|
|
198
|
+
tool_call: Optional tool call request
|
|
199
|
+
tool_result: Optional tool execution result
|
|
200
|
+
error: Optional exception that occurred
|
|
201
|
+
metadata: Additional event-specific data
|
|
202
|
+
iteration: Current iteration number (for recursive loops)
|
|
203
|
+
turn_id: Unique ID for this conversation turn
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
type: AgentEventType
|
|
207
|
+
"""The type of this event"""
|
|
208
|
+
|
|
209
|
+
timestamp: float = field(default_factory=time.time)
|
|
210
|
+
"""Unix timestamp when this event was created"""
|
|
211
|
+
|
|
212
|
+
# ===== Optional Fields (based on event type) =====
|
|
213
|
+
|
|
214
|
+
phase: Optional[str] = None
|
|
215
|
+
"""Execution phase name (e.g., 'context_assembly', 'tool_execution')"""
|
|
216
|
+
|
|
217
|
+
content: Optional[str] = None
|
|
218
|
+
"""Text content (for LLM_DELTA, AGENT_FINISH, etc.)"""
|
|
219
|
+
|
|
220
|
+
tool_call: Optional[ToolCall] = None
|
|
221
|
+
"""Tool call request (for LLM_TOOL_CALLS, TOOL_EXECUTION_START)"""
|
|
222
|
+
|
|
223
|
+
tool_result: Optional[ToolResult] = None
|
|
224
|
+
"""Tool execution result (for TOOL_RESULT, TOOL_ERROR)"""
|
|
225
|
+
|
|
226
|
+
error: Optional[Exception] = None
|
|
227
|
+
"""Exception that occurred (for ERROR events)"""
|
|
228
|
+
|
|
229
|
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
|
230
|
+
"""Additional event-specific data"""
|
|
231
|
+
|
|
232
|
+
# ===== Tracking Fields =====
|
|
233
|
+
|
|
234
|
+
iteration: Optional[int] = None
|
|
235
|
+
"""Current iteration number (for recursive agent loops)"""
|
|
236
|
+
|
|
237
|
+
turn_id: Optional[str] = None
|
|
238
|
+
"""Unique identifier for this conversation turn"""
|
|
239
|
+
|
|
240
|
+
def __post_init__(self):
|
|
241
|
+
"""Generate turn_id if not provided"""
|
|
242
|
+
if self.turn_id is None:
|
|
243
|
+
self.turn_id = f"turn_{uuid.uuid4().hex[:12]}"
|
|
244
|
+
|
|
245
|
+
# ===== Convenience Constructors =====
|
|
246
|
+
|
|
247
|
+
@classmethod
|
|
248
|
+
def phase_start(cls, phase: str, **metadata) -> "AgentEvent":
|
|
249
|
+
"""Create a PHASE_START event"""
|
|
250
|
+
return cls(
|
|
251
|
+
type=AgentEventType.PHASE_START,
|
|
252
|
+
phase=phase,
|
|
253
|
+
metadata=metadata
|
|
254
|
+
)
|
|
255
|
+
|
|
256
|
+
@classmethod
|
|
257
|
+
def phase_end(cls, phase: str, **metadata) -> "AgentEvent":
|
|
258
|
+
"""Create a PHASE_END event"""
|
|
259
|
+
return cls(
|
|
260
|
+
type=AgentEventType.PHASE_END,
|
|
261
|
+
phase=phase,
|
|
262
|
+
metadata=metadata
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
@classmethod
|
|
266
|
+
def llm_delta(cls, content: str, **metadata) -> "AgentEvent":
|
|
267
|
+
"""Create an LLM_DELTA event for streaming text"""
|
|
268
|
+
return cls(
|
|
269
|
+
type=AgentEventType.LLM_DELTA,
|
|
270
|
+
content=content,
|
|
271
|
+
metadata=metadata
|
|
272
|
+
)
|
|
273
|
+
|
|
274
|
+
@classmethod
|
|
275
|
+
def tool_progress(
|
|
276
|
+
cls,
|
|
277
|
+
tool_name: str,
|
|
278
|
+
status: str,
|
|
279
|
+
**metadata
|
|
280
|
+
) -> "AgentEvent":
|
|
281
|
+
"""Create a TOOL_PROGRESS event"""
|
|
282
|
+
return cls(
|
|
283
|
+
type=AgentEventType.TOOL_PROGRESS,
|
|
284
|
+
metadata={"tool_name": tool_name, "status": status, **metadata}
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
@classmethod
|
|
288
|
+
def tool_result(
|
|
289
|
+
cls,
|
|
290
|
+
tool_result: ToolResult,
|
|
291
|
+
**metadata
|
|
292
|
+
) -> "AgentEvent":
|
|
293
|
+
"""Create a TOOL_RESULT event"""
|
|
294
|
+
return cls(
|
|
295
|
+
type=AgentEventType.TOOL_RESULT,
|
|
296
|
+
tool_result=tool_result,
|
|
297
|
+
metadata=metadata
|
|
298
|
+
)
|
|
299
|
+
|
|
300
|
+
@classmethod
|
|
301
|
+
def agent_finish(cls, content: str, **metadata) -> "AgentEvent":
|
|
302
|
+
"""Create an AGENT_FINISH event"""
|
|
303
|
+
return cls(
|
|
304
|
+
type=AgentEventType.AGENT_FINISH,
|
|
305
|
+
content=content,
|
|
306
|
+
metadata=metadata
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
@classmethod
|
|
310
|
+
def error(cls, error: Exception, **metadata) -> "AgentEvent":
|
|
311
|
+
"""Create an ERROR event"""
|
|
312
|
+
return cls(
|
|
313
|
+
type=AgentEventType.ERROR,
|
|
314
|
+
error=error,
|
|
315
|
+
metadata=metadata
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
# ===== Utility Methods =====
|
|
319
|
+
|
|
320
|
+
def is_terminal(self) -> bool:
|
|
321
|
+
"""Check if this event signals execution completion"""
|
|
322
|
+
return self.type in {
|
|
323
|
+
AgentEventType.AGENT_FINISH,
|
|
324
|
+
AgentEventType.MAX_ITERATIONS_REACHED,
|
|
325
|
+
AgentEventType.ERROR
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
def is_llm_content(self) -> bool:
|
|
329
|
+
"""Check if this event contains LLM-generated content"""
|
|
330
|
+
return self.type in {
|
|
331
|
+
AgentEventType.LLM_DELTA,
|
|
332
|
+
AgentEventType.LLM_COMPLETE,
|
|
333
|
+
AgentEventType.AGENT_FINISH
|
|
334
|
+
}
|
|
335
|
+
|
|
336
|
+
def is_tool_event(self) -> bool:
|
|
337
|
+
"""Check if this is a tool-related event"""
|
|
338
|
+
return self.type.value.startswith("tool_")
|
|
339
|
+
|
|
340
|
+
def __repr__(self) -> str:
|
|
341
|
+
"""Human-readable representation"""
|
|
342
|
+
parts = [f"AgentEvent({self.type.value}"]
|
|
343
|
+
|
|
344
|
+
if self.phase:
|
|
345
|
+
parts.append(f"phase={self.phase}")
|
|
346
|
+
|
|
347
|
+
if self.content:
|
|
348
|
+
preview = self.content[:50] + "..." if len(self.content) > 50 else self.content
|
|
349
|
+
parts.append(f"content='{preview}'")
|
|
350
|
+
|
|
351
|
+
if self.tool_call:
|
|
352
|
+
parts.append(f"tool={self.tool_call.name}")
|
|
353
|
+
|
|
354
|
+
# Access instance variable directly to avoid class method with same name
|
|
355
|
+
tool_result_instance = self.__dict__.get('tool_result')
|
|
356
|
+
if tool_result_instance and isinstance(tool_result_instance, ToolResult):
|
|
357
|
+
parts.append(f"tool={tool_result_instance.tool_name}")
|
|
358
|
+
|
|
359
|
+
if self.error:
|
|
360
|
+
parts.append(f"error={type(self.error).__name__}")
|
|
361
|
+
|
|
362
|
+
if self.iteration is not None:
|
|
363
|
+
parts.append(f"iter={self.iteration}")
|
|
364
|
+
|
|
365
|
+
return ", ".join(parts) + ")"
|
|
366
|
+
|
|
367
|
+
|
|
368
|
+
# ===== Event Consumer Helpers =====
|
|
369
|
+
|
|
370
|
+
class EventCollector:
|
|
371
|
+
"""
|
|
372
|
+
Helper class to collect and filter events during agent execution.
|
|
373
|
+
|
|
374
|
+
Example:
|
|
375
|
+
```python
|
|
376
|
+
collector = EventCollector()
|
|
377
|
+
|
|
378
|
+
async for event in agent.execute(prompt):
|
|
379
|
+
collector.add(event)
|
|
380
|
+
|
|
381
|
+
# Get all LLM content
|
|
382
|
+
llm_text = collector.get_llm_content()
|
|
383
|
+
|
|
384
|
+
# Get all tool results
|
|
385
|
+
tool_results = collector.get_tool_results()
|
|
386
|
+
```
|
|
387
|
+
"""
|
|
388
|
+
|
|
389
|
+
def __init__(self):
|
|
390
|
+
self.events: List[AgentEvent] = []
|
|
391
|
+
|
|
392
|
+
def add(self, event: AgentEvent):
|
|
393
|
+
"""Add an event to the collection"""
|
|
394
|
+
self.events.append(event)
|
|
395
|
+
|
|
396
|
+
def filter(self, event_type: AgentEventType) -> List[AgentEvent]:
|
|
397
|
+
"""Get all events of a specific type"""
|
|
398
|
+
return [e for e in self.events if e.type == event_type]
|
|
399
|
+
|
|
400
|
+
def get_llm_content(self) -> str:
|
|
401
|
+
"""Reconstruct full LLM output from LLM_DELTA events"""
|
|
402
|
+
deltas = self.filter(AgentEventType.LLM_DELTA)
|
|
403
|
+
return "".join(e.content or "" for e in deltas)
|
|
404
|
+
|
|
405
|
+
def get_tool_results(self) -> List[ToolResult]:
|
|
406
|
+
"""Get all tool results"""
|
|
407
|
+
result_events = self.filter(AgentEventType.TOOL_RESULT)
|
|
408
|
+
return [e.tool_result for e in result_events if e.tool_result]
|
|
409
|
+
|
|
410
|
+
def get_errors(self) -> List[Exception]:
|
|
411
|
+
"""Get all errors that occurred"""
|
|
412
|
+
error_events = self.filter(AgentEventType.ERROR)
|
|
413
|
+
return [e.error for e in error_events if e.error]
|
|
414
|
+
|
|
415
|
+
def get_final_response(self) -> Optional[str]:
|
|
416
|
+
"""Get the final agent response"""
|
|
417
|
+
finish_events = self.filter(AgentEventType.AGENT_FINISH)
|
|
418
|
+
if finish_events:
|
|
419
|
+
return finish_events[-1].content
|
|
420
|
+
return None
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
class EventFilter:
|
|
424
|
+
"""
|
|
425
|
+
事件过滤器 - 提供高级事件过滤和批量处理能力
|
|
426
|
+
|
|
427
|
+
新特性 (Loom 0.0.3):
|
|
428
|
+
- 智能事件过滤
|
|
429
|
+
- 批量事件处理
|
|
430
|
+
- 事件聚合和合并
|
|
431
|
+
- 性能优化的事件流
|
|
432
|
+
"""
|
|
433
|
+
|
|
434
|
+
def __init__(self,
|
|
435
|
+
allowed_types: Optional[List[AgentEventType]] = None,
|
|
436
|
+
blocked_types: Optional[List[AgentEventType]] = None,
|
|
437
|
+
enable_batching: bool = True,
|
|
438
|
+
batch_size: int = 10,
|
|
439
|
+
batch_timeout: float = 0.1):
|
|
440
|
+
"""
|
|
441
|
+
初始化事件过滤器
|
|
442
|
+
|
|
443
|
+
Args:
|
|
444
|
+
allowed_types: 允许的事件类型列表(None = 全部允许)
|
|
445
|
+
blocked_types: 阻止的事件类型列表
|
|
446
|
+
enable_batching: 启用批量处理
|
|
447
|
+
batch_size: 批量大小
|
|
448
|
+
batch_timeout: 批量超时时间(秒)
|
|
449
|
+
"""
|
|
450
|
+
self.allowed_types = allowed_types
|
|
451
|
+
self.blocked_types = blocked_types or []
|
|
452
|
+
self.enable_batching = enable_batching
|
|
453
|
+
self.batch_size = batch_size
|
|
454
|
+
self.batch_timeout = batch_timeout
|
|
455
|
+
|
|
456
|
+
# 批量处理状态
|
|
457
|
+
self._batch_buffer: List[AgentEvent] = []
|
|
458
|
+
self._last_batch_time = time.time()
|
|
459
|
+
|
|
460
|
+
def should_include(self, event: AgentEvent) -> bool:
|
|
461
|
+
"""判断事件是否应该被包含"""
|
|
462
|
+
# 检查允许的类型
|
|
463
|
+
if self.allowed_types and event.type not in self.allowed_types:
|
|
464
|
+
return False
|
|
465
|
+
|
|
466
|
+
# 检查阻止的类型
|
|
467
|
+
if event.type in self.blocked_types:
|
|
468
|
+
return False
|
|
469
|
+
|
|
470
|
+
return True
|
|
471
|
+
|
|
472
|
+
def process_event(self, event: AgentEvent) -> List[AgentEvent]:
|
|
473
|
+
"""
|
|
474
|
+
处理单个事件,可能返回批量事件
|
|
475
|
+
|
|
476
|
+
Returns:
|
|
477
|
+
处理后的事件列表
|
|
478
|
+
"""
|
|
479
|
+
if not self.should_include(event):
|
|
480
|
+
return []
|
|
481
|
+
|
|
482
|
+
if not self.enable_batching:
|
|
483
|
+
return [event]
|
|
484
|
+
|
|
485
|
+
# 添加到批量缓冲区
|
|
486
|
+
self._batch_buffer.append(event)
|
|
487
|
+
|
|
488
|
+
# 检查是否需要输出批量事件
|
|
489
|
+
should_flush = (
|
|
490
|
+
len(self._batch_buffer) >= self.batch_size or
|
|
491
|
+
(time.time() - self._last_batch_time) >= self.batch_timeout
|
|
492
|
+
)
|
|
493
|
+
|
|
494
|
+
if should_flush:
|
|
495
|
+
return self._flush_batch()
|
|
496
|
+
|
|
497
|
+
return []
|
|
498
|
+
|
|
499
|
+
def _flush_batch(self) -> List[AgentEvent]:
|
|
500
|
+
"""输出批量事件并清空缓冲区"""
|
|
501
|
+
if not self._batch_buffer:
|
|
502
|
+
return []
|
|
503
|
+
|
|
504
|
+
# 智能聚合相同类型的事件
|
|
505
|
+
aggregated_events = self._aggregate_events(self._batch_buffer)
|
|
506
|
+
|
|
507
|
+
# 清空缓冲区
|
|
508
|
+
self._batch_buffer.clear()
|
|
509
|
+
self._last_batch_time = time.time()
|
|
510
|
+
|
|
511
|
+
return aggregated_events
|
|
512
|
+
|
|
513
|
+
def _aggregate_events(self, events: List[AgentEvent]) -> List[AgentEvent]:
|
|
514
|
+
"""智能聚合事件"""
|
|
515
|
+
if not events:
|
|
516
|
+
return []
|
|
517
|
+
|
|
518
|
+
# 按类型分组
|
|
519
|
+
events_by_type: Dict[AgentEventType, List[AgentEvent]] = {}
|
|
520
|
+
for event in events:
|
|
521
|
+
if event.type not in events_by_type:
|
|
522
|
+
events_by_type[event.type] = []
|
|
523
|
+
events_by_type[event.type].append(event)
|
|
524
|
+
|
|
525
|
+
aggregated = []
|
|
526
|
+
|
|
527
|
+
for event_type, type_events in events_by_type.items():
|
|
528
|
+
if event_type == AgentEventType.LLM_DELTA:
|
|
529
|
+
# 合并 LLM delta 事件
|
|
530
|
+
merged_content = "".join(e.content or "" for e in type_events)
|
|
531
|
+
if merged_content:
|
|
532
|
+
# 创建合并的事件
|
|
533
|
+
merged_event = AgentEvent(
|
|
534
|
+
type=AgentEventType.LLM_DELTA,
|
|
535
|
+
content=merged_content,
|
|
536
|
+
timestamp=type_events[0].timestamp,
|
|
537
|
+
metadata={
|
|
538
|
+
"batch_size": len(type_events),
|
|
539
|
+
"aggregated": True
|
|
540
|
+
}
|
|
541
|
+
)
|
|
542
|
+
aggregated.append(merged_event)
|
|
543
|
+
else:
|
|
544
|
+
# 其他类型的事件保持原样
|
|
545
|
+
aggregated.extend(type_events)
|
|
546
|
+
|
|
547
|
+
return aggregated
|
|
548
|
+
|
|
549
|
+
def flush_remaining(self) -> List[AgentEvent]:
|
|
550
|
+
"""强制输出剩余的事件"""
|
|
551
|
+
return self._flush_batch()
|
|
552
|
+
|
|
553
|
+
|
|
554
|
+
class EventProcessor:
|
|
555
|
+
"""
|
|
556
|
+
事件处理器 - 提供高级事件处理能力
|
|
557
|
+
|
|
558
|
+
新特性 (Loom 0.0.3):
|
|
559
|
+
- 事件优先级管理
|
|
560
|
+
- 智能事件路由
|
|
561
|
+
- 事件统计和分析
|
|
562
|
+
- 性能监控
|
|
563
|
+
"""
|
|
564
|
+
|
|
565
|
+
def __init__(self,
|
|
566
|
+
filters: Optional[List[EventFilter]] = None,
|
|
567
|
+
enable_stats: bool = True):
|
|
568
|
+
"""
|
|
569
|
+
初始化事件处理器
|
|
570
|
+
|
|
571
|
+
Args:
|
|
572
|
+
filters: 事件过滤器列表
|
|
573
|
+
enable_stats: 启用统计功能
|
|
574
|
+
"""
|
|
575
|
+
self.filters = filters or []
|
|
576
|
+
self.enable_stats = enable_stats
|
|
577
|
+
|
|
578
|
+
# 统计信息
|
|
579
|
+
self._stats = {
|
|
580
|
+
"total_events": 0,
|
|
581
|
+
"filtered_events": 0,
|
|
582
|
+
"batched_events": 0,
|
|
583
|
+
"events_by_type": {},
|
|
584
|
+
"processing_times": []
|
|
585
|
+
}
|
|
586
|
+
|
|
587
|
+
def process_events(self, events: List[AgentEvent]) -> List[AgentEvent]:
|
|
588
|
+
"""
|
|
589
|
+
批量处理事件
|
|
590
|
+
|
|
591
|
+
Args:
|
|
592
|
+
events: 输入事件列表
|
|
593
|
+
|
|
594
|
+
Returns:
|
|
595
|
+
处理后的事件列表
|
|
596
|
+
"""
|
|
597
|
+
if not events:
|
|
598
|
+
return []
|
|
599
|
+
|
|
600
|
+
start_time = time.time()
|
|
601
|
+
processed_events = []
|
|
602
|
+
|
|
603
|
+
for event in events:
|
|
604
|
+
# 更新统计
|
|
605
|
+
if self.enable_stats:
|
|
606
|
+
self._stats["total_events"] += 1
|
|
607
|
+
event_type = event.type.value
|
|
608
|
+
self._stats["events_by_type"][event_type] = \
|
|
609
|
+
self._stats["events_by_type"].get(event_type, 0) + 1
|
|
610
|
+
|
|
611
|
+
# 应用过滤器
|
|
612
|
+
for filter_obj in self.filters:
|
|
613
|
+
filtered = filter_obj.process_event(event)
|
|
614
|
+
processed_events.extend(filtered)
|
|
615
|
+
|
|
616
|
+
# 如果没有过滤器,直接添加事件
|
|
617
|
+
if not self.filters:
|
|
618
|
+
processed_events.append(event)
|
|
619
|
+
|
|
620
|
+
# 强制刷新所有过滤器的批量缓冲区
|
|
621
|
+
for filter_obj in self.filters:
|
|
622
|
+
remaining = filter_obj.flush_remaining()
|
|
623
|
+
processed_events.extend(remaining)
|
|
624
|
+
|
|
625
|
+
# 更新处理时间统计
|
|
626
|
+
if self.enable_stats:
|
|
627
|
+
processing_time = time.time() - start_time
|
|
628
|
+
self._stats["processing_times"].append(processing_time)
|
|
629
|
+
self._stats["filtered_events"] = len(processed_events)
|
|
630
|
+
|
|
631
|
+
return processed_events
|
|
632
|
+
|
|
633
|
+
def get_stats(self) -> Dict[str, Any]:
|
|
634
|
+
"""获取处理统计信息"""
|
|
635
|
+
if not self.enable_stats:
|
|
636
|
+
return {}
|
|
637
|
+
|
|
638
|
+
avg_processing_time = (
|
|
639
|
+
sum(self._stats["processing_times"]) / len(self._stats["processing_times"])
|
|
640
|
+
if self._stats["processing_times"] else 0
|
|
641
|
+
)
|
|
642
|
+
|
|
643
|
+
return {
|
|
644
|
+
**self._stats,
|
|
645
|
+
"average_processing_time": avg_processing_time,
|
|
646
|
+
"filter_efficiency": (
|
|
647
|
+
self._stats["filtered_events"] / self._stats["total_events"]
|
|
648
|
+
if self._stats["total_events"] > 0 else 0
|
|
649
|
+
)
|
|
650
|
+
}
|
|
651
|
+
|
|
652
|
+
def reset_stats(self) -> None:
|
|
653
|
+
"""重置统计信息"""
|
|
654
|
+
self._stats = {
|
|
655
|
+
"total_events": 0,
|
|
656
|
+
"filtered_events": 0,
|
|
657
|
+
"batched_events": 0,
|
|
658
|
+
"events_by_type": {},
|
|
659
|
+
"processing_times": []
|
|
660
|
+
}
|