agentfield 0.1.22rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentfield/__init__.py +66 -0
- agentfield/agent.py +3569 -0
- agentfield/agent_ai.py +1125 -0
- agentfield/agent_cli.py +386 -0
- agentfield/agent_field_handler.py +494 -0
- agentfield/agent_mcp.py +534 -0
- agentfield/agent_registry.py +29 -0
- agentfield/agent_server.py +1185 -0
- agentfield/agent_utils.py +269 -0
- agentfield/agent_workflow.py +323 -0
- agentfield/async_config.py +278 -0
- agentfield/async_execution_manager.py +1227 -0
- agentfield/client.py +1447 -0
- agentfield/connection_manager.py +280 -0
- agentfield/decorators.py +527 -0
- agentfield/did_manager.py +337 -0
- agentfield/dynamic_skills.py +304 -0
- agentfield/execution_context.py +255 -0
- agentfield/execution_state.py +453 -0
- agentfield/http_connection_manager.py +429 -0
- agentfield/litellm_adapters.py +140 -0
- agentfield/logger.py +249 -0
- agentfield/mcp_client.py +204 -0
- agentfield/mcp_manager.py +340 -0
- agentfield/mcp_stdio_bridge.py +550 -0
- agentfield/memory.py +723 -0
- agentfield/memory_events.py +489 -0
- agentfield/multimodal.py +173 -0
- agentfield/multimodal_response.py +403 -0
- agentfield/pydantic_utils.py +227 -0
- agentfield/rate_limiter.py +280 -0
- agentfield/result_cache.py +441 -0
- agentfield/router.py +190 -0
- agentfield/status.py +70 -0
- agentfield/types.py +710 -0
- agentfield/utils.py +26 -0
- agentfield/vc_generator.py +464 -0
- agentfield/vision.py +198 -0
- agentfield-0.1.22rc2.dist-info/METADATA +102 -0
- agentfield-0.1.22rc2.dist-info/RECORD +42 -0
- agentfield-0.1.22rc2.dist-info/WHEEL +5 -0
- agentfield-0.1.22rc2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,278 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Async execution configuration for the AgentField SDK.
|
|
3
|
+
|
|
4
|
+
This module provides configuration classes for managing async execution behavior,
|
|
5
|
+
polling strategies, resource limits, and performance tuning parameters.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
import os
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
@dataclass
|
|
13
|
+
class AsyncConfig:
|
|
14
|
+
"""
|
|
15
|
+
Configuration class for async execution behavior.
|
|
16
|
+
|
|
17
|
+
This class defines all the parameters needed for efficient async execution
|
|
18
|
+
including polling intervals, resource limits, timeouts, and performance tuning.
|
|
19
|
+
"""
|
|
20
|
+
|
|
21
|
+
# Polling Strategy Configuration
|
|
22
|
+
initial_poll_interval: float = 0.03 # 30ms - aggressive initial polling
|
|
23
|
+
fast_poll_interval: float = 0.08 # 80ms - for short executions (0-10s)
|
|
24
|
+
medium_poll_interval: float = 0.4 # 400ms - for medium executions (10s-60s)
|
|
25
|
+
slow_poll_interval: float = 1.5 # 1.5s - for long executions (60s+)
|
|
26
|
+
max_poll_interval: float = 4.0 # 4s - maximum polling interval
|
|
27
|
+
|
|
28
|
+
# Execution Duration Thresholds (in seconds)
|
|
29
|
+
fast_execution_threshold: float = 10.0 # Switch to medium polling after 10s
|
|
30
|
+
medium_execution_threshold: float = 60.0 # Switch to slow polling after 60s
|
|
31
|
+
|
|
32
|
+
# Timeout Configuration
|
|
33
|
+
max_execution_timeout: float = 21600.0 # 6 hours maximum execution time
|
|
34
|
+
default_execution_timeout: float = 7200.0 # 2 hours default timeout
|
|
35
|
+
polling_timeout: float = 20.0 # 20s timeout for individual poll requests
|
|
36
|
+
|
|
37
|
+
# Resource Limits
|
|
38
|
+
max_concurrent_executions: int = 4096 # Maximum concurrent executions to track
|
|
39
|
+
max_active_polls: int = 512 # Maximum concurrent polling operations
|
|
40
|
+
connection_pool_size: int = 64 # HTTP connection pool size
|
|
41
|
+
connection_pool_per_host: int = 32 # Connections per host
|
|
42
|
+
|
|
43
|
+
# Batch Processing
|
|
44
|
+
batch_size: int = 100 # Maximum executions to check in single batch
|
|
45
|
+
batch_poll_interval: float = 0.1 # 100ms - interval for batch polling
|
|
46
|
+
|
|
47
|
+
# Caching Configuration
|
|
48
|
+
result_cache_ttl: float = 600.0 # 10 minutes - cache completed results
|
|
49
|
+
result_cache_max_size: int = 20000 # Maximum cached results
|
|
50
|
+
|
|
51
|
+
# Memory Management
|
|
52
|
+
cleanup_interval: float = 30.0 # 30 seconds - cleanup completed executions
|
|
53
|
+
max_completed_executions: int = 4000 # Keep max 4000 completed executions
|
|
54
|
+
completed_execution_retention_seconds: float = (
|
|
55
|
+
600.0 # Retain completed executions for 10 minutes
|
|
56
|
+
)
|
|
57
|
+
|
|
58
|
+
# Retry and Backoff Configuration
|
|
59
|
+
max_retry_attempts: int = 3 # Maximum retry attempts for failed polls
|
|
60
|
+
retry_backoff_base: float = 1.0 # Base backoff time (seconds)
|
|
61
|
+
retry_backoff_multiplier: float = 2.0 # Exponential backoff multiplier
|
|
62
|
+
retry_backoff_max: float = 30.0 # Maximum backoff time
|
|
63
|
+
|
|
64
|
+
# Circuit Breaker Configuration
|
|
65
|
+
circuit_breaker_failure_threshold: int = 5 # Failures before opening circuit
|
|
66
|
+
circuit_breaker_recovery_timeout: float = 60.0 # Time before attempting recovery
|
|
67
|
+
circuit_breaker_success_threshold: int = 3 # Successes needed to close circuit
|
|
68
|
+
|
|
69
|
+
# Logging and Monitoring
|
|
70
|
+
enable_performance_logging: bool = False # Enable detailed performance logs
|
|
71
|
+
enable_polling_metrics: bool = False # Enable polling metrics collection
|
|
72
|
+
log_slow_executions: bool = True # Log executions exceeding threshold
|
|
73
|
+
slow_execution_threshold: float = 30.0 # Threshold for slow execution logging
|
|
74
|
+
|
|
75
|
+
# Feature Flags
|
|
76
|
+
enable_async_execution: bool = True # Master switch for async execution
|
|
77
|
+
enable_batch_polling: bool = True # Enable batch status checking
|
|
78
|
+
enable_result_caching: bool = True # Enable result caching
|
|
79
|
+
enable_connection_pooling: bool = True # Enable HTTP connection pooling
|
|
80
|
+
fallback_to_sync: bool = True # Fallback to sync if async fails
|
|
81
|
+
|
|
82
|
+
# Event streaming (SSE) configuration
|
|
83
|
+
enable_event_stream: bool = False # Subscribe to SSE updates when available
|
|
84
|
+
event_stream_path: str = "/api/ui/v1/executions/events"
|
|
85
|
+
event_stream_retry_backoff: float = (
|
|
86
|
+
3.0 # Seconds before reconnect after stream errors
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
@classmethod
|
|
90
|
+
def from_environment(cls) -> "AsyncConfig":
|
|
91
|
+
"""
|
|
92
|
+
Create AsyncConfig from environment variables.
|
|
93
|
+
|
|
94
|
+
Environment variables use the prefix AGENTFIELD_ASYNC_ followed by the
|
|
95
|
+
uppercase parameter name. For example:
|
|
96
|
+
- AGENTFIELD_ASYNC_MAX_EXECUTION_TIMEOUT=1800
|
|
97
|
+
- AGENTFIELD_ASYNC_BATCH_SIZE=50
|
|
98
|
+
|
|
99
|
+
Returns:
|
|
100
|
+
AsyncConfig instance with values from environment variables
|
|
101
|
+
"""
|
|
102
|
+
config = cls()
|
|
103
|
+
|
|
104
|
+
# Helper function to get env var with type conversion
|
|
105
|
+
def get_env_var(name: str, default_value, converter=None):
|
|
106
|
+
env_name = f"AGENTFIELD_ASYNC_{name.upper()}"
|
|
107
|
+
value = os.getenv(env_name)
|
|
108
|
+
if value is None:
|
|
109
|
+
return default_value
|
|
110
|
+
|
|
111
|
+
if converter:
|
|
112
|
+
try:
|
|
113
|
+
return converter(value)
|
|
114
|
+
except (ValueError, TypeError):
|
|
115
|
+
return default_value
|
|
116
|
+
return value
|
|
117
|
+
|
|
118
|
+
# Polling Configuration
|
|
119
|
+
config.initial_poll_interval = get_env_var(
|
|
120
|
+
"initial_poll_interval", config.initial_poll_interval, float
|
|
121
|
+
)
|
|
122
|
+
config.fast_poll_interval = get_env_var(
|
|
123
|
+
"fast_poll_interval", config.fast_poll_interval, float
|
|
124
|
+
)
|
|
125
|
+
config.medium_poll_interval = get_env_var(
|
|
126
|
+
"medium_poll_interval", config.medium_poll_interval, float
|
|
127
|
+
)
|
|
128
|
+
config.slow_poll_interval = get_env_var(
|
|
129
|
+
"slow_poll_interval", config.slow_poll_interval, float
|
|
130
|
+
)
|
|
131
|
+
config.max_poll_interval = get_env_var(
|
|
132
|
+
"max_poll_interval", config.max_poll_interval, float
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
# Timeout Configuration
|
|
136
|
+
config.max_execution_timeout = get_env_var(
|
|
137
|
+
"max_execution_timeout", config.max_execution_timeout, float
|
|
138
|
+
)
|
|
139
|
+
config.default_execution_timeout = get_env_var(
|
|
140
|
+
"default_execution_timeout", config.default_execution_timeout, float
|
|
141
|
+
)
|
|
142
|
+
config.polling_timeout = get_env_var(
|
|
143
|
+
"polling_timeout", config.polling_timeout, float
|
|
144
|
+
)
|
|
145
|
+
|
|
146
|
+
# Resource Limits
|
|
147
|
+
config.max_concurrent_executions = get_env_var(
|
|
148
|
+
"max_concurrent_executions", config.max_concurrent_executions, int
|
|
149
|
+
)
|
|
150
|
+
config.max_active_polls = get_env_var(
|
|
151
|
+
"max_active_polls", config.max_active_polls, int
|
|
152
|
+
)
|
|
153
|
+
config.connection_pool_size = get_env_var(
|
|
154
|
+
"connection_pool_size", config.connection_pool_size, int
|
|
155
|
+
)
|
|
156
|
+
config.batch_size = get_env_var("batch_size", config.batch_size, int)
|
|
157
|
+
|
|
158
|
+
# Feature Flags
|
|
159
|
+
config.enable_async_execution = get_env_var(
|
|
160
|
+
"enable_async_execution",
|
|
161
|
+
config.enable_async_execution,
|
|
162
|
+
lambda x: x.lower() == "true",
|
|
163
|
+
)
|
|
164
|
+
config.enable_batch_polling = get_env_var(
|
|
165
|
+
"enable_batch_polling",
|
|
166
|
+
config.enable_batch_polling,
|
|
167
|
+
lambda x: x.lower() == "true",
|
|
168
|
+
)
|
|
169
|
+
config.enable_result_caching = get_env_var(
|
|
170
|
+
"enable_result_caching",
|
|
171
|
+
config.enable_result_caching,
|
|
172
|
+
lambda x: x.lower() == "true",
|
|
173
|
+
)
|
|
174
|
+
config.fallback_to_sync = get_env_var(
|
|
175
|
+
"fallback_to_sync", config.fallback_to_sync, lambda x: x.lower() == "true"
|
|
176
|
+
)
|
|
177
|
+
config.enable_event_stream = get_env_var(
|
|
178
|
+
"enable_event_stream",
|
|
179
|
+
config.enable_event_stream,
|
|
180
|
+
lambda x: x.lower() == "true",
|
|
181
|
+
)
|
|
182
|
+
config.event_stream_path = get_env_var(
|
|
183
|
+
"event_stream_path", config.event_stream_path
|
|
184
|
+
)
|
|
185
|
+
config.event_stream_retry_backoff = get_env_var(
|
|
186
|
+
"event_stream_retry_backoff", config.event_stream_retry_backoff, float
|
|
187
|
+
)
|
|
188
|
+
|
|
189
|
+
config.completed_execution_retention_seconds = get_env_var(
|
|
190
|
+
"completed_execution_retention_seconds",
|
|
191
|
+
config.completed_execution_retention_seconds,
|
|
192
|
+
float,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
return config
|
|
196
|
+
|
|
197
|
+
def validate(self) -> None:
|
|
198
|
+
"""
|
|
199
|
+
Validate configuration parameters.
|
|
200
|
+
|
|
201
|
+
Raises:
|
|
202
|
+
ValueError: If any configuration parameter is invalid
|
|
203
|
+
"""
|
|
204
|
+
if self.initial_poll_interval <= 0:
|
|
205
|
+
raise ValueError("initial_poll_interval must be positive")
|
|
206
|
+
|
|
207
|
+
if self.max_execution_timeout <= 0:
|
|
208
|
+
raise ValueError("max_execution_timeout must be positive")
|
|
209
|
+
|
|
210
|
+
if self.default_execution_timeout <= 0:
|
|
211
|
+
raise ValueError("default_execution_timeout must be positive")
|
|
212
|
+
|
|
213
|
+
if self.default_execution_timeout > self.max_execution_timeout:
|
|
214
|
+
raise ValueError(
|
|
215
|
+
"default_execution_timeout cannot exceed max_execution_timeout"
|
|
216
|
+
)
|
|
217
|
+
|
|
218
|
+
if self.max_concurrent_executions <= 0:
|
|
219
|
+
raise ValueError("max_concurrent_executions must be positive")
|
|
220
|
+
|
|
221
|
+
if self.batch_size <= 0:
|
|
222
|
+
raise ValueError("batch_size must be positive")
|
|
223
|
+
|
|
224
|
+
if self.connection_pool_size <= 0:
|
|
225
|
+
raise ValueError("connection_pool_size must be positive")
|
|
226
|
+
|
|
227
|
+
# Ensure polling intervals are in logical order
|
|
228
|
+
if not (
|
|
229
|
+
self.initial_poll_interval
|
|
230
|
+
<= self.fast_poll_interval
|
|
231
|
+
<= self.medium_poll_interval
|
|
232
|
+
<= self.slow_poll_interval
|
|
233
|
+
<= self.max_poll_interval
|
|
234
|
+
):
|
|
235
|
+
raise ValueError("Polling intervals must be in ascending order")
|
|
236
|
+
|
|
237
|
+
# Ensure thresholds are logical
|
|
238
|
+
if self.fast_execution_threshold >= self.medium_execution_threshold:
|
|
239
|
+
raise ValueError(
|
|
240
|
+
"fast_execution_threshold must be less than medium_execution_threshold"
|
|
241
|
+
)
|
|
242
|
+
|
|
243
|
+
if self.completed_execution_retention_seconds < 0:
|
|
244
|
+
raise ValueError("completed_execution_retention_seconds cannot be negative")
|
|
245
|
+
|
|
246
|
+
def get_poll_interval_for_age(self, execution_age: float) -> float:
|
|
247
|
+
"""
|
|
248
|
+
Get the appropriate polling interval based on execution age.
|
|
249
|
+
|
|
250
|
+
Args:
|
|
251
|
+
execution_age: Age of the execution in seconds
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Appropriate polling interval in seconds
|
|
255
|
+
"""
|
|
256
|
+
if execution_age < self.fast_execution_threshold:
|
|
257
|
+
return self.fast_poll_interval
|
|
258
|
+
elif execution_age < self.medium_execution_threshold:
|
|
259
|
+
return self.medium_poll_interval
|
|
260
|
+
else:
|
|
261
|
+
return self.slow_poll_interval
|
|
262
|
+
|
|
263
|
+
def __str__(self) -> str:
|
|
264
|
+
"""String representation of the configuration."""
|
|
265
|
+
return (
|
|
266
|
+
f"AsyncConfig("
|
|
267
|
+
f"polling={self.initial_poll_interval}->{self.max_poll_interval}s, "
|
|
268
|
+
f"timeout={self.max_execution_timeout}s, "
|
|
269
|
+
f"max_concurrent={self.max_concurrent_executions}, "
|
|
270
|
+
f"batch_size={self.batch_size}, "
|
|
271
|
+
f"async_enabled={self.enable_async_execution}, "
|
|
272
|
+
f"event_stream={self.enable_event_stream}"
|
|
273
|
+
f")"
|
|
274
|
+
)
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
# Global default configuration instance
|
|
278
|
+
DEFAULT_ASYNC_CONFIG = AsyncConfig()
|