agentfield 0.1.22rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. agentfield/__init__.py +66 -0
  2. agentfield/agent.py +3569 -0
  3. agentfield/agent_ai.py +1125 -0
  4. agentfield/agent_cli.py +386 -0
  5. agentfield/agent_field_handler.py +494 -0
  6. agentfield/agent_mcp.py +534 -0
  7. agentfield/agent_registry.py +29 -0
  8. agentfield/agent_server.py +1185 -0
  9. agentfield/agent_utils.py +269 -0
  10. agentfield/agent_workflow.py +323 -0
  11. agentfield/async_config.py +278 -0
  12. agentfield/async_execution_manager.py +1227 -0
  13. agentfield/client.py +1447 -0
  14. agentfield/connection_manager.py +280 -0
  15. agentfield/decorators.py +527 -0
  16. agentfield/did_manager.py +337 -0
  17. agentfield/dynamic_skills.py +304 -0
  18. agentfield/execution_context.py +255 -0
  19. agentfield/execution_state.py +453 -0
  20. agentfield/http_connection_manager.py +429 -0
  21. agentfield/litellm_adapters.py +140 -0
  22. agentfield/logger.py +249 -0
  23. agentfield/mcp_client.py +204 -0
  24. agentfield/mcp_manager.py +340 -0
  25. agentfield/mcp_stdio_bridge.py +550 -0
  26. agentfield/memory.py +723 -0
  27. agentfield/memory_events.py +489 -0
  28. agentfield/multimodal.py +173 -0
  29. agentfield/multimodal_response.py +403 -0
  30. agentfield/pydantic_utils.py +227 -0
  31. agentfield/rate_limiter.py +280 -0
  32. agentfield/result_cache.py +441 -0
  33. agentfield/router.py +190 -0
  34. agentfield/status.py +70 -0
  35. agentfield/types.py +710 -0
  36. agentfield/utils.py +26 -0
  37. agentfield/vc_generator.py +464 -0
  38. agentfield/vision.py +198 -0
  39. agentfield-0.1.22rc2.dist-info/METADATA +102 -0
  40. agentfield-0.1.22rc2.dist-info/RECORD +42 -0
  41. agentfield-0.1.22rc2.dist-info/WHEEL +5 -0
  42. agentfield-0.1.22rc2.dist-info/top_level.txt +1 -0
agentfield/types.py ADDED
@@ -0,0 +1,710 @@
1
+ from dataclasses import asdict, dataclass, field
2
+ from typing import Any, Dict, List, Literal, Optional
3
+ from pydantic import BaseModel, Field
4
+ from enum import Enum
5
+
6
+
7
+ class AgentStatus(str, Enum):
8
+ """Agent lifecycle status enum matching the Go backend"""
9
+
10
+ STARTING = "starting"
11
+ READY = "ready"
12
+ DEGRADED = "degraded"
13
+ OFFLINE = "offline"
14
+
15
+
16
+ @dataclass
17
+ class MCPServerHealth:
18
+ """MCP server health information for heartbeat reporting"""
19
+
20
+ alias: str
21
+ status: str
22
+ tool_count: int = 0
23
+ port: Optional[int] = None
24
+ process_id: Optional[int] = None
25
+ started_at: Optional[str] = None
26
+ last_health_check: Optional[str] = None
27
+
28
+ def to_dict(self) -> Dict[str, Any]:
29
+ return asdict(self)
30
+
31
+
32
+ @dataclass
33
+ class HeartbeatData:
34
+ """Enhanced heartbeat data with status and MCP information"""
35
+
36
+ status: AgentStatus
37
+ mcp_servers: List[MCPServerHealth]
38
+ timestamp: str
39
+
40
+ def to_dict(self) -> Dict[str, Any]:
41
+ return {
42
+ "status": self.status.value,
43
+ "mcp_servers": [server.to_dict() for server in self.mcp_servers],
44
+ "timestamp": self.timestamp,
45
+ }
46
+
47
+
48
+ @dataclass
49
+ class MemoryConfig:
50
+ auto_inject: List[str]
51
+ memory_retention: str
52
+ cache_results: bool
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ return asdict(self)
56
+
57
+
58
+ @dataclass
59
+ class ReasonerDefinition:
60
+ id: str
61
+ input_schema: Dict[str, Any]
62
+ output_schema: Dict[str, Any]
63
+ memory_config: Optional[MemoryConfig] = None # Optional for now, can be added later
64
+
65
+ def to_dict(self) -> Dict[str, Any]:
66
+ data = asdict(self)
67
+ if self.memory_config is not None:
68
+ data["memory_config"] = self.memory_config.to_dict()
69
+ return data
70
+
71
+
72
+ @dataclass
73
+ class SkillDefinition:
74
+ id: str
75
+ input_schema: Dict[str, Any]
76
+ tags: List[str]
77
+
78
+ def to_dict(self) -> Dict[str, Any]:
79
+ return asdict(self)
80
+
81
+
82
+ @dataclass
83
+ class ExecutionHeaders:
84
+ """
85
+ Simple helper for constructing execution headers when initiating AgentField calls.
86
+
87
+ This replaces the wide workflow context structure with the minimal information
88
+ required by the run-based execution pipeline.
89
+ """
90
+
91
+ run_id: str
92
+ session_id: Optional[str] = None
93
+ actor_id: Optional[str] = None
94
+ parent_execution_id: Optional[str] = None
95
+
96
+ def to_headers(self) -> Dict[str, str]:
97
+ headers = {"X-Run-ID": self.run_id}
98
+ if self.parent_execution_id:
99
+ headers["X-Parent-Execution-ID"] = self.parent_execution_id
100
+ if self.session_id:
101
+ headers["X-Session-ID"] = self.session_id
102
+ if self.actor_id:
103
+ headers["X-Actor-ID"] = self.actor_id
104
+ return headers
105
+
106
+
107
+ @dataclass
108
+ class WebhookConfig:
109
+ """Webhook registration details for async executions."""
110
+
111
+ url: str
112
+ secret: Optional[str] = None
113
+ headers: Optional[Dict[str, str]] = None
114
+
115
+ def to_payload(self) -> Dict[str, Any]:
116
+ payload: Dict[str, Any] = {"url": self.url}
117
+ if self.secret:
118
+ payload["secret"] = self.secret
119
+ if self.headers:
120
+ payload["headers"] = self.headers
121
+ return payload
122
+
123
+
124
+ # -----------------------------------------------------------------------------
125
+ # Discovery API Models
126
+ # -----------------------------------------------------------------------------
127
+
128
+
129
+ @dataclass
130
+ class DiscoveryPagination:
131
+ limit: int
132
+ offset: int
133
+ has_more: bool
134
+
135
+ @classmethod
136
+ def from_dict(cls, data: Dict[str, Any]) -> "DiscoveryPagination":
137
+ return cls(
138
+ limit=int(data.get("limit", 0)),
139
+ offset=int(data.get("offset", 0)),
140
+ has_more=bool(data.get("has_more", False)),
141
+ )
142
+
143
+
144
+ @dataclass
145
+ class ReasonerCapability:
146
+ id: str
147
+ description: Optional[str]
148
+ tags: List[str]
149
+ input_schema: Optional[Dict[str, Any]]
150
+ output_schema: Optional[Dict[str, Any]]
151
+ examples: Optional[List[Dict[str, Any]]]
152
+ invocation_target: str
153
+
154
+ @classmethod
155
+ def from_dict(cls, data: Dict[str, Any]) -> "ReasonerCapability":
156
+ return cls(
157
+ id=data.get("id", ""),
158
+ description=data.get("description"),
159
+ tags=list(data.get("tags") or []),
160
+ input_schema=data.get("input_schema"),
161
+ output_schema=data.get("output_schema"),
162
+ examples=[dict(x) for x in data.get("examples") or []] or None,
163
+ invocation_target=data.get("invocation_target", ""),
164
+ )
165
+
166
+
167
+ @dataclass
168
+ class SkillCapability:
169
+ id: str
170
+ description: Optional[str]
171
+ tags: List[str]
172
+ input_schema: Optional[Dict[str, Any]]
173
+ invocation_target: str
174
+
175
+ @classmethod
176
+ def from_dict(cls, data: Dict[str, Any]) -> "SkillCapability":
177
+ return cls(
178
+ id=data.get("id", ""),
179
+ description=data.get("description"),
180
+ tags=list(data.get("tags") or []),
181
+ input_schema=data.get("input_schema"),
182
+ invocation_target=data.get("invocation_target", ""),
183
+ )
184
+
185
+
186
+ @dataclass
187
+ class AgentCapability:
188
+ agent_id: str
189
+ base_url: str
190
+ version: str
191
+ health_status: str
192
+ deployment_type: str
193
+ last_heartbeat: str
194
+ reasoners: List[ReasonerCapability] = field(default_factory=list)
195
+ skills: List[SkillCapability] = field(default_factory=list)
196
+
197
+ @classmethod
198
+ def from_dict(cls, data: Dict[str, Any]) -> "AgentCapability":
199
+ return cls(
200
+ agent_id=data.get("agent_id", ""),
201
+ base_url=data.get("base_url", ""),
202
+ version=data.get("version", ""),
203
+ health_status=data.get("health_status", ""),
204
+ deployment_type=data.get("deployment_type", ""),
205
+ last_heartbeat=data.get("last_heartbeat", ""),
206
+ reasoners=[
207
+ ReasonerCapability.from_dict(r) for r in data.get("reasoners") or []
208
+ ],
209
+ skills=[SkillCapability.from_dict(s) for s in data.get("skills") or []],
210
+ )
211
+
212
+
213
+ @dataclass
214
+ class DiscoveryResponse:
215
+ discovered_at: str
216
+ total_agents: int
217
+ total_reasoners: int
218
+ total_skills: int
219
+ pagination: DiscoveryPagination
220
+ capabilities: List[AgentCapability]
221
+
222
+ @classmethod
223
+ def from_dict(cls, data: Dict[str, Any]) -> "DiscoveryResponse":
224
+ return cls(
225
+ discovered_at=str(data.get("discovered_at", "")),
226
+ total_agents=int(data.get("total_agents", 0)),
227
+ total_reasoners=int(data.get("total_reasoners", 0)),
228
+ total_skills=int(data.get("total_skills", 0)),
229
+ pagination=DiscoveryPagination.from_dict(data.get("pagination") or {}),
230
+ capabilities=[
231
+ AgentCapability.from_dict(cap)
232
+ for cap in data.get("capabilities") or []
233
+ ],
234
+ )
235
+
236
+
237
+ @dataclass
238
+ class CompactCapability:
239
+ id: str
240
+ agent_id: str
241
+ target: str
242
+ tags: List[str]
243
+
244
+ @classmethod
245
+ def from_dict(cls, data: Dict[str, Any]) -> "CompactCapability":
246
+ return cls(
247
+ id=data.get("id", ""),
248
+ agent_id=data.get("agent_id", ""),
249
+ target=data.get("target", ""),
250
+ tags=list(data.get("tags") or []),
251
+ )
252
+
253
+
254
+ @dataclass
255
+ class CompactDiscoveryResponse:
256
+ discovered_at: str
257
+ reasoners: List[CompactCapability]
258
+ skills: List[CompactCapability]
259
+
260
+ @classmethod
261
+ def from_dict(cls, data: Dict[str, Any]) -> "CompactDiscoveryResponse":
262
+ return cls(
263
+ discovered_at=str(data.get("discovered_at", "")),
264
+ reasoners=[
265
+ CompactCapability.from_dict(r) for r in data.get("reasoners") or []
266
+ ],
267
+ skills=[CompactCapability.from_dict(s) for s in data.get("skills") or []],
268
+ )
269
+
270
+
271
+ @dataclass
272
+ class DiscoveryResult:
273
+ format: str
274
+ raw: str
275
+ json: Optional[DiscoveryResponse] = None
276
+ compact: Optional[CompactDiscoveryResponse] = None
277
+ xml: Optional[str] = None
278
+
279
+
280
+ class AIConfig(BaseModel):
281
+ """
282
+ Configuration for AI calls, defining default models, temperatures, and other parameters.
283
+ These settings can be overridden at the method call level.
284
+
285
+ Leverages LiteLLM's standard environment variable handling for API keys:
286
+ - OPENAI_API_KEY, ANTHROPIC_API_KEY, AZURE_OPENAI_API_KEY, etc.
287
+ - LiteLLM automatically detects and uses these standard environment variables
288
+
289
+ All fields have sensible defaults, so you can create an AIConfig with minimal configuration:
290
+
291
+ Examples:
292
+ # Minimal configuration - uses all defaults
293
+ AIConfig()
294
+
295
+ # Override just the API key
296
+ AIConfig(api_key="your-key")
297
+
298
+ # Override specific models for multimodal tasks
299
+ AIConfig(audio_model="tts-1-hd", vision_model="dall-e-3")
300
+ """
301
+
302
+ model: str = Field(
303
+ default="gpt-4o",
304
+ description="Default LLM model to use (e.g., 'gpt-4o', 'claude-3-sonnet').",
305
+ )
306
+ temperature: Optional[float] = Field(
307
+ default=None,
308
+ ge=0.0,
309
+ le=2.0,
310
+ description="Creativity level (0.0-2.0). If None, uses model's default.",
311
+ )
312
+ max_tokens: Optional[int] = Field(
313
+ default=None,
314
+ description="Maximum response length. If None, uses model's default.",
315
+ )
316
+ top_p: Optional[float] = Field(
317
+ default=None,
318
+ ge=0.0,
319
+ le=1.0,
320
+ description="Controls diversity via nucleus sampling. If None, uses model's default.",
321
+ )
322
+ stream: Optional[bool] = Field(
323
+ default=None,
324
+ description="Enable streaming response. If None, uses model's default.",
325
+ )
326
+ response_format: Literal["auto", "json", "text"] = Field(
327
+ default="auto", description="Desired response format."
328
+ )
329
+
330
+ # Multimodal settings - updated with better defaults for TTS
331
+ vision_model: str = Field(
332
+ default="dall-e-3", description="Model for vision/image generation tasks."
333
+ )
334
+ audio_model: str = Field(
335
+ default="tts-1",
336
+ description="Model for audio generation (tts-1, tts-1-hd, gpt-4o-mini-tts).",
337
+ )
338
+ image_quality: Literal["low", "high"] = Field(
339
+ default="high", description="Quality for image generation/processing."
340
+ )
341
+ audio_format: str = Field(
342
+ default="wav", description="Default format for audio output (wav, mp3)."
343
+ )
344
+
345
+ # Behavior settings
346
+ timeout: Optional[int] = Field(
347
+ default=None,
348
+ description="Timeout for AI calls in seconds. If None, uses LiteLLM's default.",
349
+ )
350
+ retry_attempts: Optional[int] = Field(
351
+ default=None,
352
+ description="Number of retry attempts for failed AI calls. If None, uses LiteLLM's default.",
353
+ )
354
+ retry_delay: float = Field(
355
+ default=1.0, description="Delay between retries in seconds."
356
+ )
357
+
358
+ # Rate limiting configuration
359
+ rate_limit_max_retries: int = Field(
360
+ default=20,
361
+ description="Maximum number of retries for rate limit errors (allows up to ~20 minutes of retries).",
362
+ )
363
+ rate_limit_base_delay: float = Field(
364
+ default=1.0,
365
+ description="Base delay for rate limit exponential backoff in seconds.",
366
+ )
367
+ rate_limit_max_delay: float = Field(
368
+ default=300.0,
369
+ description="Maximum delay for rate limit backoff in seconds (5 minutes).",
370
+ )
371
+ rate_limit_jitter_factor: float = Field(
372
+ default=0.25,
373
+ description="Jitter factor for rate limit backoff (±25% randomization).",
374
+ )
375
+ rate_limit_circuit_breaker_threshold: int = Field(
376
+ default=10,
377
+ description="Number of consecutive rate limit failures before opening circuit breaker.",
378
+ )
379
+ rate_limit_circuit_breaker_timeout: int = Field(
380
+ default=300, description="Circuit breaker timeout in seconds (5 minutes)."
381
+ )
382
+ enable_rate_limit_retry: bool = Field(
383
+ default=True, description="Enable automatic retry for rate limit errors."
384
+ )
385
+
386
+ # Cost controls
387
+ max_cost_per_call: Optional[float] = Field(
388
+ default=None, description="Maximum cost per AI call in USD."
389
+ )
390
+ daily_budget: Optional[float] = Field(
391
+ default=None, description="Daily budget for AI calls in USD."
392
+ )
393
+
394
+ # Memory integration (defaults for auto-injection)
395
+ auto_inject_memory: List[str] = Field(
396
+ default_factory=list,
397
+ description="List of memory scopes to auto-inject (e.g., ['workflow', 'session']).",
398
+ )
399
+ preserve_context: bool = Field(
400
+ default=True,
401
+ description="Whether to preserve conversation context across calls.",
402
+ )
403
+ context_window: int = Field(
404
+ default=10, description="Number of previous messages to include in context."
405
+ )
406
+
407
+ # LiteLLM configuration - these get passed directly to litellm.completion()
408
+ api_key: Optional[str] = Field(
409
+ default=None, description="API key override (if not using env vars)"
410
+ )
411
+ api_base: Optional[str] = Field(default=None, description="Custom API base URL")
412
+ api_version: Optional[str] = Field(
413
+ default=None, description="API version (for Azure)"
414
+ )
415
+ organization: Optional[str] = Field(
416
+ default=None, description="Organization ID (for OpenAI)"
417
+ )
418
+
419
+ # Additional LiteLLM parameters that can be overridden
420
+ litellm_params: Dict[str, Any] = Field(
421
+ default_factory=dict, description="Additional parameters to pass to LiteLLM"
422
+ )
423
+ fallback_models: List[str] = Field(
424
+ default_factory=list,
425
+ description="List of models to fallback to if primary fails.",
426
+ )
427
+
428
+ # Model limits caching for optimization
429
+ model_limits_cache: Dict[str, Dict[str, Any]] = Field(
430
+ default_factory=dict,
431
+ description="Cached model limits to avoid repeated API calls",
432
+ )
433
+ avg_chars_per_token: int = Field(
434
+ default=4, description="Average characters per token for approximation"
435
+ )
436
+ max_input_tokens: Optional[int] = Field(
437
+ default=None,
438
+ description="Maximum input context tokens (overrides auto-detection)",
439
+ )
440
+
441
+ # Pydantic V2: allow fields that start with `model_`
442
+ model_config = {"protected_namespaces": ()}
443
+
444
+ # Fallback model context mappings for when LiteLLM detection fails
445
+ _MODEL_CONTEXT_LIMITS = {
446
+ # OpenRouter Gemini models
447
+ "openrouter/google/gemini-2.5-flash-lite": 1048576, # 1M tokens
448
+ "openrouter/google/gemini-2.5-flash": 1048576, # 1M tokens
449
+ "openrouter/google/gemini-2.5-pro": 2097152, # 2M tokens
450
+ "openrouter/google/gemini-1.5-pro": 2097152, # 2M tokens
451
+ "openrouter/google/gemini-1.5-flash": 1048576, # 1M tokens
452
+ # Direct Gemini models
453
+ "gemini-2.5-flash": 1048576,
454
+ "gemini-2.5-pro": 2097152,
455
+ "gemini-1.5-pro": 2097152,
456
+ "gemini-1.5-flash": 1048576,
457
+ # OpenAI models
458
+ "openrouter/openai/gpt-4.1-mini": 128000,
459
+ "openrouter/openai/gpt-4o": 128000,
460
+ "openrouter/openai/gpt-4o-mini": 128000,
461
+ "gpt-4o": 128000,
462
+ "gpt-4o-mini": 128000,
463
+ "gpt-4": 8192,
464
+ "gpt-3.5-turbo": 16385,
465
+ # Claude models
466
+ "openrouter/anthropic/claude-3.5-sonnet": 200000,
467
+ "openrouter/anthropic/claude-3-opus": 200000,
468
+ "claude-3.5-sonnet": 200000,
469
+ "claude-3-opus": 200000,
470
+ }
471
+
472
+ async def get_model_limits(self, model: Optional[str] = None) -> Dict[str, Any]:
473
+ """
474
+ Fetch and cache model limits to avoid repeated API calls.
475
+
476
+ Args:
477
+ model: Model to get limits for (defaults to self.model)
478
+
479
+ Returns:
480
+ Dict containing context_length and max_output_tokens
481
+ """
482
+ target_model = model or self.model
483
+
484
+ # Return cached limits if available
485
+ if target_model in self.model_limits_cache:
486
+ return self.model_limits_cache[target_model]
487
+
488
+ fallback_context = self._MODEL_CONTEXT_LIMITS.get(target_model)
489
+
490
+ try:
491
+ import litellm
492
+
493
+ # Fetch model info once and cache it
494
+ info = litellm.get_model_info(target_model)
495
+
496
+ except Exception:
497
+ info = None # Ensure info is undefined outside except
498
+
499
+ if info is not None:
500
+ context_length = (
501
+ getattr(info, "max_tokens", None) or fallback_context or 131072
502
+ )
503
+ max_output = getattr(info, "max_output_tokens", None) or getattr(
504
+ info, "max_completion_tokens", None
505
+ )
506
+ else:
507
+ context_length = fallback_context or 8192
508
+ max_output = None
509
+
510
+ if not max_output:
511
+ # Default to a conservative completion window capped at 32K
512
+ max_output = min(32768, max(2048, context_length // 4))
513
+
514
+ limits = {
515
+ "context_length": context_length,
516
+ "max_output_tokens": max_output,
517
+ }
518
+
519
+ self.model_limits_cache[target_model] = limits
520
+ return limits
521
+
522
+ def trim_by_chars(self, text: str, limit: int, head_ratio: float = 0.2) -> str:
523
+ """
524
+ Trim text by character count using head/tail ratio to preserve important content.
525
+
526
+ Args:
527
+ text: Text to trim
528
+ limit: Character limit
529
+ head_ratio: Ratio of content to keep from the beginning (0.0-1.0)
530
+
531
+ Returns:
532
+ Trimmed text with head and tail preserved
533
+ """
534
+ if len(text) <= limit:
535
+ return text
536
+
537
+ head_chars = int(limit * head_ratio)
538
+ tail_chars = int(limit * (1 - head_ratio))
539
+
540
+ head = text[:head_chars]
541
+ tail = text[-tail_chars:]
542
+
543
+ return head + "\n…TRIMMED…\n" + tail
544
+
545
+ def get_safe_prompt_chars(
546
+ self, model: Optional[str] = None, max_output_tokens: Optional[int] = None
547
+ ) -> int:
548
+ """
549
+ Calculate safe character limit for prompts based on cached model limits.
550
+
551
+ Args:
552
+ model: Model to calculate for (defaults to self.model)
553
+ max_output_tokens: Override for max output tokens
554
+
555
+ Returns:
556
+ Safe character limit for prompts
557
+ """
558
+ # This is a synchronous method that uses cached limits
559
+ target_model = model or self.model
560
+
561
+ # Use cached limits if available, otherwise use conservative defaults
562
+ if target_model in self.model_limits_cache:
563
+ limits = self.model_limits_cache[target_model]
564
+ max_ctx = limits["context_length"]
565
+ max_out = max_output_tokens or limits["max_output_tokens"] or 0
566
+ else:
567
+ # Conservative defaults if not cached yet
568
+ max_ctx = 8192
569
+ max_out = max_output_tokens or 4096
570
+
571
+ # Calculate safe prompt character limit
572
+ safe_prompt_chars = (max_ctx - max_out) * self.avg_chars_per_token
573
+ return max(safe_prompt_chars, 1000) # Ensure minimum viable prompt size
574
+
575
+ def get_litellm_params(
576
+ self, messages: Optional[List[Dict]] = None, **overrides
577
+ ) -> Dict[str, Any]:
578
+ """
579
+ Get parameters formatted for LiteLLM, with runtime overrides and smart token management.
580
+ LiteLLM handles environment variable detection automatically.
581
+ """
582
+ params = {
583
+ "model": self.model,
584
+ "temperature": self.temperature,
585
+ "max_tokens": self.max_tokens,
586
+ "top_p": self.top_p,
587
+ "stream": self.stream,
588
+ "timeout": self.timeout,
589
+ "num_retries": self.retry_attempts,
590
+ }
591
+
592
+ # Add optional parameters if set
593
+ if self.api_key:
594
+ params["api_key"] = self.api_key
595
+ if self.api_base:
596
+ params["api_base"] = self.api_base
597
+ if self.api_version:
598
+ params["api_version"] = self.api_version
599
+ if self.organization:
600
+ params["organization"] = self.organization
601
+
602
+ # Add response format if not auto
603
+ if self.response_format != "auto":
604
+ params["response_format"] = {"type": self.response_format}
605
+
606
+ # Add any additional litellm params
607
+ params.update(self.litellm_params)
608
+
609
+ # Apply runtime overrides (highest priority)
610
+ params.update(overrides)
611
+
612
+ # Remove None values
613
+ params = {k: v for k, v in params.items() if v is not None}
614
+
615
+ # OpenAI Responses API expects max_completion_tokens instead of max_tokens
616
+ model_name = params.get("model") or self.model
617
+ provider = (
618
+ model_name.split("/", 1)[0] if model_name and "/" in model_name else None
619
+ )
620
+ if provider == "openai" and "max_tokens" in params:
621
+ params["max_completion_tokens"] = params.pop("max_tokens")
622
+
623
+ return params
624
+
625
+ def copy(
626
+ self,
627
+ *,
628
+ include: Optional[Any] = None,
629
+ exclude: Optional[Any] = None,
630
+ update: Optional[Dict[str, Any]] = None,
631
+ deep: bool = False,
632
+ ) -> "AIConfig":
633
+ """Create a copy of the configuration"""
634
+ return super().copy(include=include, exclude=exclude, update=update, deep=deep)
635
+
636
+ def to_dict(self) -> Dict[str, Any]:
637
+ """Convert to dictionary representation"""
638
+ return self.model_dump()
639
+
640
+ @classmethod
641
+ def from_env(cls, **overrides) -> "AIConfig":
642
+ """
643
+ Create AIConfig with smart defaults, letting LiteLLM handle env vars.
644
+ This is the recommended way to create configs in production.
645
+ """
646
+ config = cls(**overrides)
647
+ return config
648
+
649
+
650
+ @dataclass
651
+ class MemoryValue:
652
+ """Represents a memory value stored in the AgentField system."""
653
+
654
+ key: str
655
+ data: Any
656
+ scope: str
657
+ scope_id: str
658
+ created_at: str
659
+ updated_at: str
660
+
661
+ def to_dict(self) -> Dict[str, Any]:
662
+ return asdict(self)
663
+
664
+ @classmethod
665
+ def from_dict(cls, data: Dict[str, Any]) -> "MemoryValue":
666
+ return cls(**data)
667
+
668
+
669
+ @dataclass
670
+ class MemoryChangeEvent:
671
+ """Represents a memory change event for reactive programming."""
672
+
673
+ id: Optional[str] = None
674
+ type: Optional[str] = None
675
+ timestamp: Optional[str] = None
676
+ scope: str = ""
677
+ scope_id: str = ""
678
+ key: str = ""
679
+ action: str = ""
680
+ data: Optional[Any] = None
681
+ previous_data: Optional[Any] = None
682
+ metadata: Dict[str, Any] = field(default_factory=dict)
683
+
684
+ def to_dict(self) -> Dict[str, Any]:
685
+ return asdict(self)
686
+
687
+ @property
688
+ def new_value(self) -> Optional[Any]:
689
+ """Backward compatibility alias for data."""
690
+ return self.data
691
+
692
+ @property
693
+ def old_value(self) -> Optional[Any]:
694
+ """Backward compatibility alias for previous_data."""
695
+ return self.previous_data
696
+
697
+ @classmethod
698
+ def from_dict(cls, data: Dict[str, Any]) -> "MemoryChangeEvent":
699
+ return cls(
700
+ id=data.get("id"),
701
+ type=data.get("type"),
702
+ timestamp=data.get("timestamp"),
703
+ scope=data.get("scope", ""),
704
+ scope_id=data.get("scope_id", ""),
705
+ key=data.get("key", ""),
706
+ action=data.get("action", ""),
707
+ data=data.get("data"),
708
+ previous_data=data.get("previous_data"),
709
+ metadata=data.get("metadata") or {},
710
+ )