agentfield 0.1.22rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. agentfield/__init__.py +66 -0
  2. agentfield/agent.py +3569 -0
  3. agentfield/agent_ai.py +1125 -0
  4. agentfield/agent_cli.py +386 -0
  5. agentfield/agent_field_handler.py +494 -0
  6. agentfield/agent_mcp.py +534 -0
  7. agentfield/agent_registry.py +29 -0
  8. agentfield/agent_server.py +1185 -0
  9. agentfield/agent_utils.py +269 -0
  10. agentfield/agent_workflow.py +323 -0
  11. agentfield/async_config.py +278 -0
  12. agentfield/async_execution_manager.py +1227 -0
  13. agentfield/client.py +1447 -0
  14. agentfield/connection_manager.py +280 -0
  15. agentfield/decorators.py +527 -0
  16. agentfield/did_manager.py +337 -0
  17. agentfield/dynamic_skills.py +304 -0
  18. agentfield/execution_context.py +255 -0
  19. agentfield/execution_state.py +453 -0
  20. agentfield/http_connection_manager.py +429 -0
  21. agentfield/litellm_adapters.py +140 -0
  22. agentfield/logger.py +249 -0
  23. agentfield/mcp_client.py +204 -0
  24. agentfield/mcp_manager.py +340 -0
  25. agentfield/mcp_stdio_bridge.py +550 -0
  26. agentfield/memory.py +723 -0
  27. agentfield/memory_events.py +489 -0
  28. agentfield/multimodal.py +173 -0
  29. agentfield/multimodal_response.py +403 -0
  30. agentfield/pydantic_utils.py +227 -0
  31. agentfield/rate_limiter.py +280 -0
  32. agentfield/result_cache.py +441 -0
  33. agentfield/router.py +190 -0
  34. agentfield/status.py +70 -0
  35. agentfield/types.py +710 -0
  36. agentfield/utils.py +26 -0
  37. agentfield/vc_generator.py +464 -0
  38. agentfield/vision.py +198 -0
  39. agentfield-0.1.22rc2.dist-info/METADATA +102 -0
  40. agentfield-0.1.22rc2.dist-info/RECORD +42 -0
  41. agentfield-0.1.22rc2.dist-info/WHEEL +5 -0
  42. agentfield-0.1.22rc2.dist-info/top_level.txt +1 -0
@@ -0,0 +1,255 @@
1
+ """
2
+ Minimal execution context helpers for the simplified run-based pipeline.
3
+ """
4
+
5
+ import contextvars
6
+ import time
7
+ import uuid
8
+ from dataclasses import dataclass
9
+ from typing import Any, Dict, Optional
10
+
11
+
12
+ _RUN_HEADER = "X-Run-ID"
13
+ _EXECUTION_HEADER = "X-Execution-ID"
14
+ _PARENT_EXECUTION_HEADER = "X-Parent-Execution-ID"
15
+ _SESSION_HEADER = "X-Session-ID"
16
+ _ACTOR_HEADER = "X-Actor-ID"
17
+ _CALLER_DID_HEADER = "X-Caller-DID"
18
+ _TARGET_DID_HEADER = "X-Target-DID"
19
+ _AGENT_DID_HEADER = "X-Agent-Node-DID"
20
+
21
+
22
+ @dataclass
23
+ class ExecutionContext:
24
+ """Captures the inbound execution metadata for a reasoner invocation."""
25
+
26
+ run_id: str
27
+ execution_id: str
28
+ agent_instance: Any
29
+ reasoner_name: str
30
+ agent_node_id: Optional[str] = None
31
+ parent_execution_id: Optional[str] = None
32
+ depth: int = 0
33
+ started_at: float = 0.0
34
+ session_id: Optional[str] = None
35
+ actor_id: Optional[str] = None
36
+ caller_did: Optional[str] = None
37
+ target_did: Optional[str] = None
38
+ agent_node_did: Optional[str] = None
39
+ # Compatibility fields retained for existing integrations
40
+ workflow_id: Optional[str] = None
41
+ parent_workflow_id: Optional[str] = None
42
+ root_workflow_id: Optional[str] = None
43
+ registered: bool = False
44
+
45
+ def __post_init__(self) -> None:
46
+ if not self.started_at:
47
+ self.started_at = time.time()
48
+ if not self.workflow_id:
49
+ self.workflow_id = self.run_id
50
+
51
+ # ------------------------------------------------------------------
52
+ # Header helpers
53
+
54
+ def to_headers(self) -> Dict[str, str]:
55
+ """
56
+ Produce the headers that should be forwarded for downstream executions.
57
+
58
+ We only send the run identifier and the current execution as the parent.
59
+ The AgentField backend issues fresh execution IDs for child nodes.
60
+ """
61
+
62
+ parent_execution = self.parent_execution_id or self.execution_id
63
+
64
+ headers: Dict[str, str] = {
65
+ _RUN_HEADER: self.run_id,
66
+ "X-Workflow-ID": self.workflow_id or self.run_id,
67
+ _PARENT_EXECUTION_HEADER: parent_execution,
68
+ _EXECUTION_HEADER: self.execution_id,
69
+ "X-Workflow-Run-ID": self.run_id,
70
+ }
71
+
72
+ node_id = getattr(self.agent_instance, "node_id", None)
73
+ if node_id:
74
+ headers["X-Agent-Node-ID"] = node_id
75
+
76
+ if self.session_id:
77
+ headers[_SESSION_HEADER] = self.session_id
78
+ if self.actor_id:
79
+ headers[_ACTOR_HEADER] = self.actor_id
80
+ if self.parent_workflow_id:
81
+ headers["X-Parent-Workflow-ID"] = self.parent_workflow_id
82
+ if self.root_workflow_id:
83
+ headers["X-Root-Workflow-ID"] = self.root_workflow_id
84
+ if self.caller_did:
85
+ headers[_CALLER_DID_HEADER] = self.caller_did
86
+ if self.target_did:
87
+ headers[_TARGET_DID_HEADER] = self.target_did
88
+ if self.agent_node_did:
89
+ headers[_AGENT_DID_HEADER] = self.agent_node_did
90
+ agent_instance = getattr(self, "agent_instance", None)
91
+ agent_node_id = self.agent_node_id or getattr(agent_instance, "node_id", None)
92
+ if agent_node_id:
93
+ headers["X-Agent-Node-ID"] = agent_node_id
94
+
95
+ return headers
96
+
97
+ def child_context(self) -> "ExecutionContext":
98
+ """
99
+ Create an in-process child context for local tracking.
100
+
101
+ The new execution ID is generated locally so callers can reference
102
+ it while awaiting downstream responses. The AgentField server will still
103
+ assign its own execution ID when the child request is submitted.
104
+ """
105
+
106
+ return ExecutionContext(
107
+ run_id=self.run_id,
108
+ execution_id=generate_execution_id(),
109
+ agent_instance=self.agent_instance,
110
+ agent_node_id=self.agent_node_id,
111
+ reasoner_name=self.reasoner_name,
112
+ parent_execution_id=self.execution_id,
113
+ depth=self.depth + 1,
114
+ session_id=self.session_id,
115
+ actor_id=self.actor_id,
116
+ caller_did=self.caller_did,
117
+ target_did=self.target_did,
118
+ agent_node_did=self.agent_node_did,
119
+ workflow_id=self.workflow_id,
120
+ parent_workflow_id=self.workflow_id,
121
+ root_workflow_id=self.root_workflow_id or self.workflow_id,
122
+ )
123
+
124
+ def create_child_context(self) -> "ExecutionContext":
125
+ """
126
+ Backwards-compatible wrapper returning a derived child context.
127
+ """
128
+
129
+ return self.child_context()
130
+
131
+ # ------------------------------------------------------------------
132
+ # Factories
133
+
134
+ @classmethod
135
+ def from_request(cls, request, agent_node_id: str) -> "ExecutionContext":
136
+ """
137
+ Build an execution context from inbound FastAPI request headers.
138
+
139
+ We accept both canonical and lowercase header names to match Starlette's
140
+ header behavior.
141
+ """
142
+
143
+ headers = request.headers
144
+
145
+ def _read(name: str) -> Optional[str]:
146
+ lower = name.lower()
147
+ return headers.get(lower) or headers.get(name)
148
+
149
+ workflow_id = _read("X-Workflow-ID")
150
+ run_id = _read(_RUN_HEADER) or workflow_id or generate_run_id()
151
+ if not workflow_id:
152
+ workflow_id = run_id
153
+ execution_id = _read(_EXECUTION_HEADER) or generate_execution_id()
154
+ parent_execution_id = _read(_PARENT_EXECUTION_HEADER)
155
+ session_id = _read(_SESSION_HEADER)
156
+ actor_id = _read(_ACTOR_HEADER)
157
+ caller_did = _read(_CALLER_DID_HEADER)
158
+ target_did = _read(_TARGET_DID_HEADER)
159
+ agent_node_did = _read(_AGENT_DID_HEADER)
160
+ parent_workflow_id = _read("X-Parent-Workflow-ID")
161
+ root_workflow_id = _read("X-Root-Workflow-ID")
162
+
163
+ from .agent_registry import get_current_agent_instance
164
+
165
+ return cls(
166
+ run_id=run_id,
167
+ execution_id=execution_id,
168
+ agent_instance=get_current_agent_instance(),
169
+ agent_node_id=agent_node_id,
170
+ reasoner_name="unknown",
171
+ parent_execution_id=parent_execution_id,
172
+ session_id=session_id,
173
+ actor_id=actor_id,
174
+ caller_did=caller_did,
175
+ target_did=target_did,
176
+ agent_node_did=agent_node_did,
177
+ workflow_id=workflow_id,
178
+ parent_workflow_id=parent_workflow_id,
179
+ root_workflow_id=root_workflow_id,
180
+ registered=True,
181
+ )
182
+
183
+ @classmethod
184
+ def new_root(
185
+ cls, agent_node_id: str, reasoner_name: str = "root"
186
+ ) -> "ExecutionContext":
187
+ """Create a brand-new root execution context for manual invocation."""
188
+
189
+ from .agent_registry import get_current_agent_instance
190
+
191
+ run_id = generate_run_id()
192
+ return cls(
193
+ run_id=run_id,
194
+ execution_id=generate_execution_id(),
195
+ agent_instance=get_current_agent_instance(),
196
+ agent_node_id=agent_node_id,
197
+ reasoner_name=reasoner_name,
198
+ parent_execution_id=None,
199
+ workflow_id=run_id,
200
+ root_workflow_id=run_id,
201
+ )
202
+
203
+ @classmethod
204
+ def create_new(cls, agent_node_id: str, workflow_name: str) -> "ExecutionContext":
205
+ """
206
+ Backwards-compatible wrapper for legacy code that expected create_new().
207
+ Generates a fresh root execution context using the provided workflow name.
208
+ """
209
+
210
+ context = cls.new_root(agent_node_id, workflow_name)
211
+ context.reasoner_name = workflow_name
212
+ return context
213
+
214
+
215
+ class ExecutionContextManager:
216
+ """Async-safe access to the current execution context."""
217
+
218
+ def __init__(self) -> None:
219
+ self._context_var: contextvars.ContextVar[Optional[ExecutionContext]] = (
220
+ contextvars.ContextVar("execution_context", default=None)
221
+ )
222
+
223
+ def get_current_context(self) -> Optional[ExecutionContext]:
224
+ return self._context_var.get()
225
+
226
+ def set_context(self, context: ExecutionContext) -> contextvars.Token:
227
+ return self._context_var.set(context)
228
+
229
+ def reset_context(self, token: contextvars.Token) -> None:
230
+ self._context_var.reset(token)
231
+
232
+
233
+ _context_manager = ExecutionContextManager()
234
+
235
+
236
+ def get_current_context() -> Optional[ExecutionContext]:
237
+ return _context_manager.get_current_context()
238
+
239
+
240
+ def set_execution_context(context: ExecutionContext):
241
+ return _context_manager.set_context(context)
242
+
243
+
244
+ def reset_execution_context(token: contextvars.Token) -> None:
245
+ _context_manager.reset_context(token)
246
+
247
+
248
+ def generate_execution_id() -> str:
249
+ timestamp = int(time.time() * 1000)
250
+ return f"exec_{timestamp}_{uuid.uuid4().hex[:8]}"
251
+
252
+
253
+ def generate_run_id() -> str:
254
+ timestamp = int(time.time() * 1000)
255
+ return f"run_{timestamp}_{uuid.uuid4().hex[:8]}"
@@ -0,0 +1,453 @@
1
+ """
2
+ Execution state management for async executions.
3
+
4
+ This module provides dataclasses and enums for tracking the state of individual
5
+ async executions throughout their lifecycle.
6
+ """
7
+
8
+ from dataclasses import dataclass, field
9
+ from datetime import datetime, timezone
10
+ from enum import Enum
11
+ from typing import Any, Dict, Optional, List
12
+ import time
13
+
14
+
15
+ class ExecutionStatus(Enum):
16
+ """Enumeration of possible execution statuses."""
17
+
18
+ PENDING = "pending"
19
+ QUEUED = "queued"
20
+ RUNNING = "running"
21
+ SUCCEEDED = "succeeded"
22
+ FAILED = "failed"
23
+ CANCELLED = "cancelled"
24
+ TIMEOUT = "timeout"
25
+ UNKNOWN = "unknown"
26
+
27
+
28
+ class ExecutionPriority(Enum):
29
+ """Enumeration of execution priorities for queue management."""
30
+
31
+ LOW = "low"
32
+ NORMAL = "normal"
33
+ HIGH = "high"
34
+ URGENT = "urgent"
35
+
36
+
37
+ @dataclass
38
+ class ExecutionMetrics:
39
+ """Metrics and performance data for an execution."""
40
+
41
+ # Timing metrics
42
+ submit_time: float = field(default_factory=time.time)
43
+ start_time: Optional[float] = None
44
+ end_time: Optional[float] = None
45
+
46
+ # Polling metrics
47
+ poll_count: int = 0
48
+ total_poll_time: float = 0.0
49
+ last_poll_time: Optional[float] = None
50
+
51
+ # Network metrics
52
+ network_requests: int = 0
53
+ network_errors: int = 0
54
+ retry_count: int = 0
55
+
56
+ # Resource metrics
57
+ result_size_bytes: Optional[int] = None
58
+ memory_usage_mb: Optional[float] = None
59
+
60
+ @property
61
+ def total_duration(self) -> Optional[float]:
62
+ """Total execution duration in seconds."""
63
+ if self.submit_time and self.end_time:
64
+ return self.end_time - self.submit_time
65
+ return None
66
+
67
+ @property
68
+ def execution_duration(self) -> Optional[float]:
69
+ """Actual execution duration (excluding queue time)."""
70
+ if self.start_time and self.end_time:
71
+ return self.end_time - self.start_time
72
+ return None
73
+
74
+ @property
75
+ def queue_duration(self) -> Optional[float]:
76
+ """Time spent in queue before execution started."""
77
+ if self.submit_time and self.start_time:
78
+ return self.start_time - self.submit_time
79
+ return None
80
+
81
+ @property
82
+ def average_poll_interval(self) -> Optional[float]:
83
+ """Average time between polls."""
84
+ if self.poll_count > 1 and self.total_poll_time > 0:
85
+ return self.total_poll_time / (self.poll_count - 1)
86
+ return None
87
+
88
+ def add_poll(self, poll_duration: float) -> None:
89
+ """Record a polling operation."""
90
+ self.poll_count += 1
91
+ self.total_poll_time += poll_duration
92
+ self.last_poll_time = time.time()
93
+ self.network_requests += 1
94
+
95
+ def add_network_error(self) -> None:
96
+ """Record a network error."""
97
+ self.network_errors += 1
98
+
99
+ def add_retry(self) -> None:
100
+ """Record a retry attempt."""
101
+ self.retry_count += 1
102
+
103
+
104
+ @dataclass
105
+ class ExecutionState:
106
+ """
107
+ Complete state information for an async execution.
108
+
109
+ This class tracks all aspects of an execution from submission to completion,
110
+ including status, results, errors, metrics, and polling information.
111
+ """
112
+
113
+ # Core identification
114
+ execution_id: str
115
+ target: str
116
+ input_data: Dict[str, Any]
117
+
118
+ # Status and lifecycle
119
+ status: ExecutionStatus = ExecutionStatus.QUEUED
120
+ priority: ExecutionPriority = ExecutionPriority.NORMAL
121
+ created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
122
+ updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
123
+
124
+ # Results and errors
125
+ result: Optional[Any] = None
126
+ error_message: Optional[str] = None
127
+ error_details: Optional[Dict[str, Any]] = None
128
+
129
+ # Execution context
130
+ workflow_id: Optional[str] = None
131
+ parent_execution_id: Optional[str] = None
132
+ session_id: Optional[str] = None
133
+ actor_id: Optional[str] = None
134
+
135
+ # Webhook metadata
136
+ webhook_registered: bool = False
137
+ webhook_error: Optional[str] = None
138
+
139
+ # Configuration
140
+ timeout: Optional[float] = None
141
+ max_retries: int = 3
142
+
143
+ # Polling state
144
+ next_poll_time: float = field(default_factory=time.time)
145
+ current_poll_interval: float = 0.05 # Start with 50ms
146
+ consecutive_failures: int = 0
147
+
148
+ # Metrics and monitoring
149
+ metrics: ExecutionMetrics = field(default_factory=ExecutionMetrics)
150
+
151
+ # Internal state
152
+ _is_cancelled: bool = field(default=False, init=False)
153
+ _cancellation_reason: Optional[str] = field(default=None, init=False)
154
+ _capacity_released: bool = field(default=False, init=False, repr=False)
155
+
156
+ def __post_init__(self):
157
+ """Post-initialization setup."""
158
+ # Ensure metrics are initialized
159
+ if not hasattr(self, "metrics") or self.metrics is None:
160
+ self.metrics = ExecutionMetrics()
161
+
162
+ # Set initial poll time
163
+ if self.next_poll_time == 0:
164
+ self.next_poll_time = time.time() + self.current_poll_interval
165
+
166
+ @property
167
+ def age(self) -> float:
168
+ """Age of the execution in seconds since creation."""
169
+ return time.time() - self.metrics.submit_time
170
+
171
+ @property
172
+ def is_terminal(self) -> bool:
173
+ """Whether the execution is in a terminal state."""
174
+ return self.status in {
175
+ ExecutionStatus.SUCCEEDED,
176
+ ExecutionStatus.FAILED,
177
+ ExecutionStatus.CANCELLED,
178
+ ExecutionStatus.TIMEOUT,
179
+ }
180
+
181
+ @property
182
+ def is_active(self) -> bool:
183
+ """Whether the execution is actively running or queued."""
184
+ return self.status in {
185
+ ExecutionStatus.PENDING,
186
+ ExecutionStatus.QUEUED,
187
+ ExecutionStatus.RUNNING,
188
+ }
189
+
190
+ @property
191
+ def is_successful(self) -> bool:
192
+ """Whether the execution completed successfully."""
193
+ return self.status == ExecutionStatus.SUCCEEDED and self.result is not None
194
+
195
+ @property
196
+ def is_cancelled(self) -> bool:
197
+ """Whether the execution has been cancelled."""
198
+ return self._is_cancelled or self.status == ExecutionStatus.CANCELLED
199
+
200
+ @property
201
+ def should_poll(self) -> bool:
202
+ """Whether this execution should be polled now."""
203
+ return (
204
+ self.is_active
205
+ and not self.is_cancelled
206
+ and time.time() >= self.next_poll_time
207
+ )
208
+
209
+ @property
210
+ def is_overdue(self) -> bool:
211
+ """Whether this execution has exceeded its timeout."""
212
+ if self.timeout is None:
213
+ return False
214
+ return self.age > self.timeout
215
+
216
+ def update_status(
217
+ self, status: ExecutionStatus, error_message: Optional[str] = None
218
+ ) -> None:
219
+ """
220
+ Update the execution status and timestamp.
221
+
222
+ Args:
223
+ status: New execution status
224
+ error_message: Optional error message for failed executions
225
+ """
226
+ old_status = self.status
227
+ self.status = status
228
+ self.updated_at = datetime.now(timezone.utc)
229
+
230
+ # Update metrics based on status change
231
+ current_time = time.time()
232
+
233
+ if old_status == ExecutionStatus.QUEUED and status == ExecutionStatus.RUNNING:
234
+ self.metrics.start_time = current_time
235
+ elif status in {
236
+ ExecutionStatus.SUCCEEDED,
237
+ ExecutionStatus.FAILED,
238
+ ExecutionStatus.CANCELLED,
239
+ ExecutionStatus.TIMEOUT,
240
+ }:
241
+ self.metrics.end_time = current_time
242
+
243
+ # Handle error cases
244
+ if status == ExecutionStatus.FAILED and error_message:
245
+ self.error_message = error_message
246
+
247
+ def set_result(self, result: Any) -> None:
248
+ """
249
+ Set the execution result and mark as completed.
250
+
251
+ Args:
252
+ result: The execution result
253
+ """
254
+ self.result = result
255
+ self.update_status(ExecutionStatus.SUCCEEDED)
256
+
257
+ # Calculate result size if possible
258
+ try:
259
+ import sys
260
+
261
+ self.metrics.result_size_bytes = sys.getsizeof(result)
262
+ except Exception:
263
+ pass # Size calculation is optional
264
+
265
+ def set_error(
266
+ self, error_message: str, error_details: Optional[Dict[str, Any]] = None
267
+ ) -> None:
268
+ """
269
+ Set execution error and mark as failed.
270
+
271
+ Args:
272
+ error_message: Human-readable error message
273
+ error_details: Optional detailed error information
274
+ """
275
+ self.error_message = error_message
276
+ self.error_details = error_details
277
+ self.update_status(ExecutionStatus.FAILED)
278
+
279
+ def cancel(self, reason: Optional[str] = None) -> None:
280
+ """
281
+ Cancel the execution.
282
+
283
+ Args:
284
+ reason: Optional cancellation reason
285
+ """
286
+ self._is_cancelled = True
287
+ self._cancellation_reason = reason
288
+ self.update_status(ExecutionStatus.CANCELLED)
289
+
290
+ def timeout_execution(self) -> None:
291
+ """Mark the execution as timed out."""
292
+ self.update_status(
293
+ ExecutionStatus.TIMEOUT, f"Execution timed out after {self.timeout} seconds"
294
+ )
295
+
296
+ def update_poll_interval(self, new_interval: float) -> None:
297
+ """
298
+ Update the polling interval and next poll time.
299
+
300
+ Args:
301
+ new_interval: New polling interval in seconds
302
+ """
303
+ self.current_poll_interval = new_interval
304
+ self.next_poll_time = time.time() + new_interval
305
+
306
+ def record_poll_attempt(self, success: bool, duration: float = 0.0) -> None:
307
+ """
308
+ Record a polling attempt.
309
+
310
+ Args:
311
+ success: Whether the poll was successful
312
+ duration: Duration of the poll request
313
+ """
314
+ self.metrics.add_poll(duration)
315
+
316
+ if success:
317
+ self.consecutive_failures = 0
318
+ else:
319
+ self.consecutive_failures += 1
320
+ self.metrics.add_network_error()
321
+
322
+ def record_retry(self) -> None:
323
+ """Record a retry attempt."""
324
+ self.metrics.add_retry()
325
+
326
+ def to_dict(self) -> Dict[str, Any]:
327
+ """
328
+ Convert execution state to dictionary representation.
329
+
330
+ Returns:
331
+ Dictionary representation of the execution state
332
+ """
333
+ return {
334
+ "execution_id": self.execution_id,
335
+ "target": self.target,
336
+ "status": self.status.value,
337
+ "priority": self.priority.value,
338
+ "created_at": self.created_at.isoformat(),
339
+ "updated_at": self.updated_at.isoformat(),
340
+ "age": self.age,
341
+ "result": self.result,
342
+ "error_message": self.error_message,
343
+ "error_details": self.error_details,
344
+ "workflow_id": self.workflow_id,
345
+ "parent_execution_id": self.parent_execution_id,
346
+ "session_id": self.session_id,
347
+ "actor_id": self.actor_id,
348
+ "timeout": self.timeout,
349
+ "is_terminal": self.is_terminal,
350
+ "is_active": self.is_active,
351
+ "is_successful": self.is_successful,
352
+ "is_cancelled": self.is_cancelled,
353
+ "metrics": {
354
+ "total_duration": self.metrics.total_duration,
355
+ "execution_duration": self.metrics.execution_duration,
356
+ "queue_duration": self.metrics.queue_duration,
357
+ "poll_count": self.metrics.poll_count,
358
+ "network_requests": self.metrics.network_requests,
359
+ "network_errors": self.metrics.network_errors,
360
+ "retry_count": self.metrics.retry_count,
361
+ "result_size_bytes": self.metrics.result_size_bytes,
362
+ "average_poll_interval": self.metrics.average_poll_interval,
363
+ },
364
+ }
365
+
366
+ def __str__(self) -> str:
367
+ """String representation of the execution state."""
368
+ return (
369
+ f"ExecutionState(id={self.execution_id[:8]}..., "
370
+ f"target={self.target}, status={self.status.value}, "
371
+ f"age={self.age:.1f}s, polls={self.metrics.poll_count})"
372
+ )
373
+
374
+ def __repr__(self) -> str:
375
+ """Detailed string representation."""
376
+ return (
377
+ f"ExecutionState("
378
+ f"execution_id='{self.execution_id}', "
379
+ f"target='{self.target}', "
380
+ f"status={self.status}, "
381
+ f"age={self.age:.2f}, "
382
+ f"polls={self.metrics.poll_count}, "
383
+ f"interval={self.current_poll_interval}"
384
+ f")"
385
+ )
386
+
387
+
388
+ @dataclass
389
+ class ExecutionBatch:
390
+ """
391
+ Represents a batch of executions for efficient batch processing.
392
+ """
393
+
394
+ executions: List[ExecutionState] = field(default_factory=list)
395
+ batch_id: str = field(default_factory=lambda: f"batch_{int(time.time() * 1000)}")
396
+ created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
397
+
398
+ @property
399
+ def size(self) -> int:
400
+ """Number of executions in the batch."""
401
+ return len(self.executions)
402
+
403
+ @property
404
+ def execution_ids(self) -> List[str]:
405
+ """List of execution IDs in the batch."""
406
+ return [exec_state.execution_id for exec_state in self.executions]
407
+
408
+ @property
409
+ def active_executions(self) -> List[ExecutionState]:
410
+ """List of active (non-terminal) executions in the batch."""
411
+ return [exec_state for exec_state in self.executions if exec_state.is_active]
412
+
413
+ @property
414
+ def completed_executions(self) -> List[ExecutionState]:
415
+ """List of completed executions in the batch."""
416
+ return [exec_state for exec_state in self.executions if exec_state.is_terminal]
417
+
418
+ def add_execution(self, execution: ExecutionState) -> None:
419
+ """Add an execution to the batch."""
420
+ if execution not in self.executions:
421
+ self.executions.append(execution)
422
+
423
+ def remove_execution(self, execution_id: str) -> Optional[ExecutionState]:
424
+ """Remove and return an execution from the batch."""
425
+ for i, execution in enumerate(self.executions):
426
+ if execution.execution_id == execution_id:
427
+ return self.executions.pop(i)
428
+ return None
429
+
430
+ def get_execution(self, execution_id: str) -> Optional[ExecutionState]:
431
+ """Get an execution by ID."""
432
+ for execution in self.executions:
433
+ if execution.execution_id == execution_id:
434
+ return execution
435
+ return None
436
+
437
+ def clear_completed(self) -> List[ExecutionState]:
438
+ """Remove and return all completed executions."""
439
+ completed = self.completed_executions
440
+ self.executions = self.active_executions
441
+ return completed
442
+
443
+ def __len__(self) -> int:
444
+ """Number of executions in the batch."""
445
+ return len(self.executions)
446
+
447
+ def __iter__(self):
448
+ """Iterate over executions in the batch."""
449
+ return iter(self.executions)
450
+
451
+ def __str__(self) -> str:
452
+ """String representation of the batch."""
453
+ return f"ExecutionBatch(id={self.batch_id}, size={self.size}, active={len(self.active_executions)})"