agentfield 0.1.22rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentfield/__init__.py +66 -0
- agentfield/agent.py +3569 -0
- agentfield/agent_ai.py +1125 -0
- agentfield/agent_cli.py +386 -0
- agentfield/agent_field_handler.py +494 -0
- agentfield/agent_mcp.py +534 -0
- agentfield/agent_registry.py +29 -0
- agentfield/agent_server.py +1185 -0
- agentfield/agent_utils.py +269 -0
- agentfield/agent_workflow.py +323 -0
- agentfield/async_config.py +278 -0
- agentfield/async_execution_manager.py +1227 -0
- agentfield/client.py +1447 -0
- agentfield/connection_manager.py +280 -0
- agentfield/decorators.py +527 -0
- agentfield/did_manager.py +337 -0
- agentfield/dynamic_skills.py +304 -0
- agentfield/execution_context.py +255 -0
- agentfield/execution_state.py +453 -0
- agentfield/http_connection_manager.py +429 -0
- agentfield/litellm_adapters.py +140 -0
- agentfield/logger.py +249 -0
- agentfield/mcp_client.py +204 -0
- agentfield/mcp_manager.py +340 -0
- agentfield/mcp_stdio_bridge.py +550 -0
- agentfield/memory.py +723 -0
- agentfield/memory_events.py +489 -0
- agentfield/multimodal.py +173 -0
- agentfield/multimodal_response.py +403 -0
- agentfield/pydantic_utils.py +227 -0
- agentfield/rate_limiter.py +280 -0
- agentfield/result_cache.py +441 -0
- agentfield/router.py +190 -0
- agentfield/status.py +70 -0
- agentfield/types.py +710 -0
- agentfield/utils.py +26 -0
- agentfield/vc_generator.py +464 -0
- agentfield/vision.py +198 -0
- agentfield-0.1.22rc2.dist-info/METADATA +102 -0
- agentfield-0.1.22rc2.dist-info/RECORD +42 -0
- agentfield-0.1.22rc2.dist-info/WHEEL +5 -0
- agentfield-0.1.22rc2.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,403 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Multimodal response classes for handling LiteLLM multimodal outputs.
|
|
3
|
+
Provides seamless integration with audio, image, and file outputs while maintaining backward compatibility.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import base64
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import tempfile
|
|
10
|
+
from pathlib import Path
|
|
11
|
+
from typing import Any, Dict, List, Optional, Union
|
|
12
|
+
|
|
13
|
+
from agentfield.logger import log_error, log_warn
|
|
14
|
+
from pydantic import BaseModel, Field
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class AudioOutput(BaseModel):
|
|
18
|
+
"""Represents audio output from LLM with convenient access methods."""
|
|
19
|
+
|
|
20
|
+
data: Optional[str] = Field(None, description="Base64-encoded audio data")
|
|
21
|
+
format: str = Field("wav", description="Audio format (wav, mp3, etc.)")
|
|
22
|
+
url: Optional[str] = Field(None, description="URL to audio file if available")
|
|
23
|
+
|
|
24
|
+
def save(self, path: Union[str, Path]) -> None:
|
|
25
|
+
"""Save audio to file."""
|
|
26
|
+
if not self.data:
|
|
27
|
+
raise ValueError("No audio data available to save")
|
|
28
|
+
|
|
29
|
+
path = Path(path)
|
|
30
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
31
|
+
|
|
32
|
+
# Decode base64 audio data
|
|
33
|
+
audio_bytes = base64.b64decode(self.data)
|
|
34
|
+
|
|
35
|
+
with open(path, "wb") as f:
|
|
36
|
+
f.write(audio_bytes)
|
|
37
|
+
|
|
38
|
+
def get_bytes(self) -> bytes:
|
|
39
|
+
"""Get raw audio bytes."""
|
|
40
|
+
if not self.data:
|
|
41
|
+
raise ValueError("No audio data available")
|
|
42
|
+
return base64.b64decode(self.data)
|
|
43
|
+
|
|
44
|
+
def play(self) -> None:
|
|
45
|
+
"""Play audio if possible (requires system audio support)."""
|
|
46
|
+
try:
|
|
47
|
+
import pygame # type: ignore
|
|
48
|
+
|
|
49
|
+
pygame.mixer.init()
|
|
50
|
+
|
|
51
|
+
# Create temporary file
|
|
52
|
+
with tempfile.NamedTemporaryFile(
|
|
53
|
+
suffix=f".{self.format}", delete=False
|
|
54
|
+
) as tmp:
|
|
55
|
+
tmp.write(self.get_bytes())
|
|
56
|
+
tmp_path = tmp.name
|
|
57
|
+
|
|
58
|
+
pygame.mixer.music.load(tmp_path)
|
|
59
|
+
pygame.mixer.music.play()
|
|
60
|
+
|
|
61
|
+
# Clean up temp file after a delay
|
|
62
|
+
import threading
|
|
63
|
+
import time
|
|
64
|
+
|
|
65
|
+
def cleanup():
|
|
66
|
+
time.sleep(5) # Wait for playback
|
|
67
|
+
try:
|
|
68
|
+
os.unlink(tmp_path)
|
|
69
|
+
except Exception:
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
threading.Thread(target=cleanup, daemon=True).start()
|
|
73
|
+
|
|
74
|
+
except ImportError:
|
|
75
|
+
log_warn("Audio playback requires pygame: pip install pygame")
|
|
76
|
+
except Exception as e:
|
|
77
|
+
log_error(f"Could not play audio: {e}")
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class ImageOutput(BaseModel):
|
|
81
|
+
"""Represents image output from LLM with convenient access methods."""
|
|
82
|
+
|
|
83
|
+
url: Optional[str] = Field(None, description="URL to image")
|
|
84
|
+
b64_json: Optional[str] = Field(None, description="Base64-encoded image data")
|
|
85
|
+
revised_prompt: Optional[str] = Field(
|
|
86
|
+
None, description="Revised prompt used for generation"
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
def save(self, path: Union[str, Path]) -> None:
|
|
90
|
+
"""Save image to file."""
|
|
91
|
+
path = Path(path)
|
|
92
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
93
|
+
|
|
94
|
+
if self.b64_json:
|
|
95
|
+
# Save from base64 data
|
|
96
|
+
image_bytes = base64.b64decode(self.b64_json)
|
|
97
|
+
with open(path, "wb") as f:
|
|
98
|
+
f.write(image_bytes)
|
|
99
|
+
elif self.url:
|
|
100
|
+
# Download from URL
|
|
101
|
+
try:
|
|
102
|
+
import requests
|
|
103
|
+
|
|
104
|
+
response = requests.get(self.url)
|
|
105
|
+
response.raise_for_status()
|
|
106
|
+
with open(path, "wb") as f:
|
|
107
|
+
f.write(response.content)
|
|
108
|
+
except ImportError:
|
|
109
|
+
raise ImportError(
|
|
110
|
+
"URL download requires requests: pip install requests"
|
|
111
|
+
)
|
|
112
|
+
else:
|
|
113
|
+
raise ValueError("No image data or URL available to save")
|
|
114
|
+
|
|
115
|
+
def get_bytes(self) -> bytes:
|
|
116
|
+
"""Get raw image bytes."""
|
|
117
|
+
if self.b64_json:
|
|
118
|
+
return base64.b64decode(self.b64_json)
|
|
119
|
+
elif self.url:
|
|
120
|
+
try:
|
|
121
|
+
import requests
|
|
122
|
+
|
|
123
|
+
response = requests.get(self.url)
|
|
124
|
+
response.raise_for_status()
|
|
125
|
+
return response.content
|
|
126
|
+
except ImportError:
|
|
127
|
+
raise ImportError(
|
|
128
|
+
"URL download requires requests: pip install requests"
|
|
129
|
+
)
|
|
130
|
+
else:
|
|
131
|
+
raise ValueError("No image data or URL available")
|
|
132
|
+
|
|
133
|
+
def show(self) -> None:
|
|
134
|
+
"""Display image if possible (requires PIL/Pillow)."""
|
|
135
|
+
try:
|
|
136
|
+
from PIL import Image # type: ignore
|
|
137
|
+
import io
|
|
138
|
+
|
|
139
|
+
image_bytes = self.get_bytes()
|
|
140
|
+
image = Image.open(io.BytesIO(image_bytes))
|
|
141
|
+
image.show()
|
|
142
|
+
except ImportError:
|
|
143
|
+
log_warn("Image display requires Pillow: pip install Pillow")
|
|
144
|
+
except Exception as e:
|
|
145
|
+
log_error(f"Could not display image: {e}")
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
class FileOutput(BaseModel):
|
|
149
|
+
"""Represents generic file output from LLM."""
|
|
150
|
+
|
|
151
|
+
url: Optional[str] = Field(None, description="URL to file")
|
|
152
|
+
data: Optional[str] = Field(None, description="Base64-encoded file data")
|
|
153
|
+
mime_type: Optional[str] = Field(None, description="MIME type of file")
|
|
154
|
+
filename: Optional[str] = Field(None, description="Suggested filename")
|
|
155
|
+
|
|
156
|
+
def save(self, path: Union[str, Path]) -> None:
|
|
157
|
+
"""Save file to disk."""
|
|
158
|
+
path = Path(path)
|
|
159
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
160
|
+
|
|
161
|
+
if self.data:
|
|
162
|
+
# Save from base64 data
|
|
163
|
+
file_bytes = base64.b64decode(self.data)
|
|
164
|
+
with open(path, "wb") as f:
|
|
165
|
+
f.write(file_bytes)
|
|
166
|
+
elif self.url:
|
|
167
|
+
# Download from URL
|
|
168
|
+
try:
|
|
169
|
+
import requests
|
|
170
|
+
|
|
171
|
+
response = requests.get(self.url)
|
|
172
|
+
response.raise_for_status()
|
|
173
|
+
with open(path, "wb") as f:
|
|
174
|
+
f.write(response.content)
|
|
175
|
+
except ImportError:
|
|
176
|
+
raise ImportError(
|
|
177
|
+
"URL download requires requests: pip install requests"
|
|
178
|
+
)
|
|
179
|
+
else:
|
|
180
|
+
raise ValueError("No file data or URL available to save")
|
|
181
|
+
|
|
182
|
+
def get_bytes(self) -> bytes:
|
|
183
|
+
"""Get raw file bytes."""
|
|
184
|
+
if self.data:
|
|
185
|
+
return base64.b64decode(self.data)
|
|
186
|
+
elif self.url:
|
|
187
|
+
try:
|
|
188
|
+
import requests
|
|
189
|
+
|
|
190
|
+
response = requests.get(self.url)
|
|
191
|
+
response.raise_for_status()
|
|
192
|
+
return response.content
|
|
193
|
+
except ImportError:
|
|
194
|
+
raise ImportError(
|
|
195
|
+
"URL download requires requests: pip install requests"
|
|
196
|
+
)
|
|
197
|
+
else:
|
|
198
|
+
raise ValueError("No file data or URL available")
|
|
199
|
+
|
|
200
|
+
|
|
201
|
+
class MultimodalResponse:
|
|
202
|
+
"""
|
|
203
|
+
Enhanced response object that provides seamless access to multimodal content
|
|
204
|
+
while maintaining backward compatibility with string responses.
|
|
205
|
+
"""
|
|
206
|
+
|
|
207
|
+
def __init__(
|
|
208
|
+
self,
|
|
209
|
+
text: str = "",
|
|
210
|
+
audio: Optional[AudioOutput] = None,
|
|
211
|
+
images: Optional[List[ImageOutput]] = None,
|
|
212
|
+
files: Optional[List[FileOutput]] = None,
|
|
213
|
+
raw_response: Optional[Any] = None,
|
|
214
|
+
):
|
|
215
|
+
self._text = text
|
|
216
|
+
self._audio = audio
|
|
217
|
+
self._images = images or []
|
|
218
|
+
self._files = files or []
|
|
219
|
+
self._raw_response = raw_response
|
|
220
|
+
|
|
221
|
+
def __str__(self) -> str:
|
|
222
|
+
"""Backward compatibility: return text content when used as string."""
|
|
223
|
+
return self._text
|
|
224
|
+
|
|
225
|
+
def __repr__(self) -> str:
|
|
226
|
+
"""Developer-friendly representation."""
|
|
227
|
+
parts = [f"text='{self._text[:50]}{'...' if len(self._text) > 50 else ''}'"]
|
|
228
|
+
if self._audio:
|
|
229
|
+
parts.append(f"audio={self._audio.format}")
|
|
230
|
+
if self._images:
|
|
231
|
+
parts.append(f"images={len(self._images)}")
|
|
232
|
+
if self._files:
|
|
233
|
+
parts.append(f"files={len(self._files)}")
|
|
234
|
+
return f"MultimodalResponse({', '.join(parts)})"
|
|
235
|
+
|
|
236
|
+
@property
|
|
237
|
+
def text(self) -> str:
|
|
238
|
+
"""Get text content."""
|
|
239
|
+
return self._text
|
|
240
|
+
|
|
241
|
+
@property
|
|
242
|
+
def audio(self) -> Optional[AudioOutput]:
|
|
243
|
+
"""Get audio output if available."""
|
|
244
|
+
return self._audio
|
|
245
|
+
|
|
246
|
+
@property
|
|
247
|
+
def images(self) -> List[ImageOutput]:
|
|
248
|
+
"""Get list of image outputs."""
|
|
249
|
+
return self._images
|
|
250
|
+
|
|
251
|
+
@property
|
|
252
|
+
def files(self) -> List[FileOutput]:
|
|
253
|
+
"""Get list of file outputs."""
|
|
254
|
+
return self._files
|
|
255
|
+
|
|
256
|
+
@property
|
|
257
|
+
def has_audio(self) -> bool:
|
|
258
|
+
"""Check if response contains audio."""
|
|
259
|
+
return self._audio is not None
|
|
260
|
+
|
|
261
|
+
@property
|
|
262
|
+
def has_images(self) -> bool:
|
|
263
|
+
"""Check if response contains images."""
|
|
264
|
+
return len(self._images) > 0
|
|
265
|
+
|
|
266
|
+
@property
|
|
267
|
+
def has_files(self) -> bool:
|
|
268
|
+
"""Check if response contains files."""
|
|
269
|
+
return len(self._files) > 0
|
|
270
|
+
|
|
271
|
+
@property
|
|
272
|
+
def is_multimodal(self) -> bool:
|
|
273
|
+
"""Check if response contains any multimodal content."""
|
|
274
|
+
return self.has_audio or self.has_images or self.has_files
|
|
275
|
+
|
|
276
|
+
@property
|
|
277
|
+
def raw_response(self) -> Optional[Any]:
|
|
278
|
+
"""Get the raw LiteLLM response object."""
|
|
279
|
+
return self._raw_response
|
|
280
|
+
|
|
281
|
+
def save_all(
|
|
282
|
+
self, directory: Union[str, Path], prefix: str = "output"
|
|
283
|
+
) -> Dict[str, str]:
|
|
284
|
+
"""
|
|
285
|
+
Save all multimodal content to a directory.
|
|
286
|
+
Returns a dict mapping content type to saved file paths.
|
|
287
|
+
"""
|
|
288
|
+
directory = Path(directory)
|
|
289
|
+
directory.mkdir(parents=True, exist_ok=True)
|
|
290
|
+
saved_files = {}
|
|
291
|
+
|
|
292
|
+
# Save audio
|
|
293
|
+
if self._audio:
|
|
294
|
+
audio_path = directory / f"{prefix}_audio.{self._audio.format}"
|
|
295
|
+
self._audio.save(audio_path)
|
|
296
|
+
saved_files["audio"] = str(audio_path)
|
|
297
|
+
|
|
298
|
+
# Save images
|
|
299
|
+
for i, image in enumerate(self._images):
|
|
300
|
+
# Determine extension from URL or default to png
|
|
301
|
+
ext = "png"
|
|
302
|
+
if image.url:
|
|
303
|
+
ext = Path(image.url).suffix.lstrip(".") or "png"
|
|
304
|
+
|
|
305
|
+
image_path = directory / f"{prefix}_image_{i}.{ext}"
|
|
306
|
+
image.save(image_path)
|
|
307
|
+
saved_files[f"image_{i}"] = str(image_path)
|
|
308
|
+
|
|
309
|
+
# Save files
|
|
310
|
+
for i, file in enumerate(self._files):
|
|
311
|
+
filename = file.filename or f"{prefix}_file_{i}"
|
|
312
|
+
file_path = directory / filename
|
|
313
|
+
file.save(file_path)
|
|
314
|
+
saved_files[f"file_{i}"] = str(file_path)
|
|
315
|
+
|
|
316
|
+
# Save text content
|
|
317
|
+
if self._text:
|
|
318
|
+
text_path = directory / f"{prefix}_text.txt"
|
|
319
|
+
with open(text_path, "w", encoding="utf-8") as f:
|
|
320
|
+
f.write(self._text)
|
|
321
|
+
saved_files["text"] = str(text_path)
|
|
322
|
+
|
|
323
|
+
return saved_files
|
|
324
|
+
|
|
325
|
+
|
|
326
|
+
def detect_multimodal_response(response: Any) -> MultimodalResponse:
|
|
327
|
+
"""
|
|
328
|
+
Automatically detect and wrap multimodal content from LiteLLM responses.
|
|
329
|
+
|
|
330
|
+
Args:
|
|
331
|
+
response: Raw response from LiteLLM (completion or image_generation)
|
|
332
|
+
|
|
333
|
+
Returns:
|
|
334
|
+
MultimodalResponse with detected content
|
|
335
|
+
"""
|
|
336
|
+
text = ""
|
|
337
|
+
audio = None
|
|
338
|
+
images = []
|
|
339
|
+
files = []
|
|
340
|
+
|
|
341
|
+
# Handle completion responses (text + potential audio)
|
|
342
|
+
if hasattr(response, "choices") and response.choices:
|
|
343
|
+
choice = response.choices[0]
|
|
344
|
+
message = choice.message
|
|
345
|
+
|
|
346
|
+
# Extract text content
|
|
347
|
+
if hasattr(message, "content") and message.content:
|
|
348
|
+
text = message.content
|
|
349
|
+
|
|
350
|
+
# Extract audio content (GPT-4o-audio-preview pattern)
|
|
351
|
+
if hasattr(message, "audio") and message.audio:
|
|
352
|
+
audio_data = getattr(message.audio, "data", None)
|
|
353
|
+
if audio_data:
|
|
354
|
+
audio = AudioOutput(
|
|
355
|
+
data=audio_data,
|
|
356
|
+
format="wav", # Default format, could be detected from response
|
|
357
|
+
url=None,
|
|
358
|
+
)
|
|
359
|
+
|
|
360
|
+
# Handle image generation responses
|
|
361
|
+
elif hasattr(response, "data") and response.data:
|
|
362
|
+
# This is likely an image generation response
|
|
363
|
+
for item in response.data:
|
|
364
|
+
if hasattr(item, "url") or hasattr(item, "b64_json"):
|
|
365
|
+
image = ImageOutput(
|
|
366
|
+
url=getattr(item, "url", None),
|
|
367
|
+
b64_json=getattr(item, "b64_json", None),
|
|
368
|
+
revised_prompt=getattr(item, "revised_prompt", None),
|
|
369
|
+
)
|
|
370
|
+
images.append(image)
|
|
371
|
+
|
|
372
|
+
# Handle direct string responses
|
|
373
|
+
elif isinstance(response, str):
|
|
374
|
+
text = response
|
|
375
|
+
|
|
376
|
+
# Handle TTS audio responses (from our _generate_tts_audio method)
|
|
377
|
+
elif hasattr(response, "audio_data") and hasattr(response, "text"):
|
|
378
|
+
text = response.text
|
|
379
|
+
# Create AudioOutput from TTS response
|
|
380
|
+
audio = AudioOutput(
|
|
381
|
+
data=response.audio_data,
|
|
382
|
+
format=getattr(response, "format", "wav"),
|
|
383
|
+
url=None,
|
|
384
|
+
)
|
|
385
|
+
|
|
386
|
+
# Handle schema responses (Pydantic models)
|
|
387
|
+
elif hasattr(response, "model_dump") or hasattr(response, "dict"):
|
|
388
|
+
# This is a Pydantic model, convert to string representation
|
|
389
|
+
try:
|
|
390
|
+
if hasattr(response, "model_dump"):
|
|
391
|
+
text = json.dumps(response.model_dump(), indent=2)
|
|
392
|
+
else:
|
|
393
|
+
text = json.dumps(response.model_dump(), indent=2)
|
|
394
|
+
except Exception:
|
|
395
|
+
text = str(response)
|
|
396
|
+
|
|
397
|
+
# Fallback to string conversion
|
|
398
|
+
else:
|
|
399
|
+
text = str(response)
|
|
400
|
+
|
|
401
|
+
return MultimodalResponse(
|
|
402
|
+
text=text, audio=audio, images=images, files=files, raw_response=response
|
|
403
|
+
)
|
|
@@ -0,0 +1,227 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Utility functions for automatic Pydantic model conversion in AgentField SDK.
|
|
3
|
+
Provides FastAPI-like automatic conversion of dictionary arguments to Pydantic model instances.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import inspect
|
|
7
|
+
from typing import Any, Tuple, Union, get_args, get_origin, get_type_hints
|
|
8
|
+
|
|
9
|
+
from agentfield.logger import log_warn
|
|
10
|
+
from pydantic import BaseModel, ValidationError
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def is_pydantic_model(type_hint: Any) -> bool:
|
|
14
|
+
"""
|
|
15
|
+
Check if a type hint represents a Pydantic model.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
type_hint: The type hint to check
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
True if the type hint is a Pydantic model class
|
|
22
|
+
"""
|
|
23
|
+
try:
|
|
24
|
+
return inspect.isclass(type_hint) and issubclass(type_hint, BaseModel)
|
|
25
|
+
except TypeError:
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def is_optional_type(type_hint: Any) -> bool:
|
|
30
|
+
"""
|
|
31
|
+
Check if a type hint represents an Optional type (Union[T, None]).
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
type_hint: The type hint to check
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
True if the type hint is Optional[T]
|
|
38
|
+
"""
|
|
39
|
+
origin = get_origin(type_hint)
|
|
40
|
+
if origin is Union:
|
|
41
|
+
args = get_args(type_hint)
|
|
42
|
+
return len(args) == 2 and type(None) in args
|
|
43
|
+
return False
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def get_optional_inner_type(type_hint: Any) -> Any:
|
|
47
|
+
"""
|
|
48
|
+
Extract the inner type from an Optional[T] type hint.
|
|
49
|
+
|
|
50
|
+
Args:
|
|
51
|
+
type_hint: The Optional type hint
|
|
52
|
+
|
|
53
|
+
Returns:
|
|
54
|
+
The inner type T from Optional[T]
|
|
55
|
+
"""
|
|
56
|
+
if is_optional_type(type_hint):
|
|
57
|
+
args = get_args(type_hint)
|
|
58
|
+
return args[0] if args[0] is not type(None) else args[1]
|
|
59
|
+
return type_hint
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def convert_dict_to_model(data: Any, model_class: type) -> Any:
|
|
63
|
+
"""
|
|
64
|
+
Convert a dictionary to a Pydantic model instance.
|
|
65
|
+
|
|
66
|
+
Args:
|
|
67
|
+
data: The data to convert (usually a dict)
|
|
68
|
+
model_class: The Pydantic model class to convert to
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
The converted Pydantic model instance, or the original data if conversion fails
|
|
72
|
+
|
|
73
|
+
Raises:
|
|
74
|
+
ValidationError: If the data doesn't match the model schema
|
|
75
|
+
"""
|
|
76
|
+
if not isinstance(data, dict):
|
|
77
|
+
# If it's already the correct type or not a dict, return as-is
|
|
78
|
+
return data
|
|
79
|
+
|
|
80
|
+
if not is_pydantic_model(model_class):
|
|
81
|
+
# Not a Pydantic model, return original data
|
|
82
|
+
return data
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
return model_class(**data)
|
|
86
|
+
except ValidationError as e:
|
|
87
|
+
# Re-raise with more context
|
|
88
|
+
raise ValidationError(
|
|
89
|
+
f"Failed to convert dictionary to {model_class.__name__}: {e}",
|
|
90
|
+
model=model_class,
|
|
91
|
+
) from e
|
|
92
|
+
except Exception as e:
|
|
93
|
+
# For any other errors, provide helpful context
|
|
94
|
+
raise ValueError(
|
|
95
|
+
f"Unexpected error converting dictionary to {model_class.__name__}: {e}"
|
|
96
|
+
) from e
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def convert_function_args(
|
|
100
|
+
func: callable, args: tuple, kwargs: dict
|
|
101
|
+
) -> Tuple[tuple, dict]:
|
|
102
|
+
"""
|
|
103
|
+
Convert function arguments to Pydantic models based on the function's type hints.
|
|
104
|
+
This mimics FastAPI's automatic request body parsing behavior.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
func: The function whose arguments should be converted
|
|
108
|
+
args: Positional arguments passed to the function
|
|
109
|
+
kwargs: Keyword arguments passed to the function
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
Tuple of (converted_args, converted_kwargs)
|
|
113
|
+
|
|
114
|
+
Raises:
|
|
115
|
+
ValidationError: If any argument fails Pydantic validation
|
|
116
|
+
"""
|
|
117
|
+
try:
|
|
118
|
+
# Get function signature and type hints
|
|
119
|
+
sig = inspect.signature(func)
|
|
120
|
+
type_hints = get_type_hints(func)
|
|
121
|
+
|
|
122
|
+
# Convert args to kwargs for easier processing
|
|
123
|
+
bound_args = sig.bind_partial(*args, **kwargs)
|
|
124
|
+
bound_args.apply_defaults()
|
|
125
|
+
|
|
126
|
+
converted_kwargs = {}
|
|
127
|
+
|
|
128
|
+
for param_name, value in bound_args.arguments.items():
|
|
129
|
+
# Skip special parameters
|
|
130
|
+
if param_name in ["self", "execution_context"]:
|
|
131
|
+
converted_kwargs[param_name] = value
|
|
132
|
+
continue
|
|
133
|
+
|
|
134
|
+
# Get the type hint for this parameter
|
|
135
|
+
type_hint = type_hints.get(param_name)
|
|
136
|
+
if type_hint is None:
|
|
137
|
+
# No type hint, keep original value
|
|
138
|
+
converted_kwargs[param_name] = value
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
# Handle Optional types
|
|
142
|
+
actual_type = type_hint
|
|
143
|
+
if is_optional_type(type_hint):
|
|
144
|
+
if value is None:
|
|
145
|
+
converted_kwargs[param_name] = None
|
|
146
|
+
continue
|
|
147
|
+
actual_type = get_optional_inner_type(type_hint)
|
|
148
|
+
|
|
149
|
+
# Convert if it's a Pydantic model
|
|
150
|
+
if is_pydantic_model(actual_type):
|
|
151
|
+
try:
|
|
152
|
+
converted_kwargs[param_name] = convert_dict_to_model(
|
|
153
|
+
value, actual_type
|
|
154
|
+
)
|
|
155
|
+
except ValidationError as e:
|
|
156
|
+
# Add parameter context to the error
|
|
157
|
+
raise ValidationError(
|
|
158
|
+
f"Validation error for parameter '{param_name}': {e}",
|
|
159
|
+
model=actual_type,
|
|
160
|
+
) from e
|
|
161
|
+
else:
|
|
162
|
+
# Not a Pydantic model, keep original value
|
|
163
|
+
converted_kwargs[param_name] = value
|
|
164
|
+
|
|
165
|
+
# Convert back to args and kwargs based on original call pattern
|
|
166
|
+
final_args = []
|
|
167
|
+
final_kwargs = {}
|
|
168
|
+
|
|
169
|
+
param_names = list(sig.parameters.keys())
|
|
170
|
+
|
|
171
|
+
# Rebuild args for positional parameters
|
|
172
|
+
for i, param_name in enumerate(param_names[: len(args)]):
|
|
173
|
+
if param_name in converted_kwargs:
|
|
174
|
+
final_args.append(converted_kwargs[param_name])
|
|
175
|
+
del converted_kwargs[param_name]
|
|
176
|
+
|
|
177
|
+
# Remaining parameters go to kwargs
|
|
178
|
+
final_kwargs.update(converted_kwargs)
|
|
179
|
+
|
|
180
|
+
return tuple(final_args), final_kwargs
|
|
181
|
+
|
|
182
|
+
except Exception as e:
|
|
183
|
+
# If conversion fails completely, return original args
|
|
184
|
+
# This ensures backward compatibility
|
|
185
|
+
if isinstance(e, ValidationError):
|
|
186
|
+
raise # Re-raise validation errors
|
|
187
|
+
|
|
188
|
+
# For other errors, log and return original
|
|
189
|
+
log_warn(f"Failed to convert arguments for {func.__name__}: {e}")
|
|
190
|
+
return args, kwargs
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def should_convert_args(func: callable) -> bool:
|
|
194
|
+
"""
|
|
195
|
+
Determine if a function's arguments should be automatically converted.
|
|
196
|
+
|
|
197
|
+
Args:
|
|
198
|
+
func: The function to check
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
True if the function has Pydantic model parameters that could benefit from conversion
|
|
202
|
+
"""
|
|
203
|
+
try:
|
|
204
|
+
type_hints = get_type_hints(func)
|
|
205
|
+
sig = inspect.signature(func)
|
|
206
|
+
|
|
207
|
+
for param_name, param in sig.parameters.items():
|
|
208
|
+
if param_name in ["self", "execution_context"]:
|
|
209
|
+
continue
|
|
210
|
+
|
|
211
|
+
type_hint = type_hints.get(param_name)
|
|
212
|
+
if type_hint is None:
|
|
213
|
+
continue
|
|
214
|
+
|
|
215
|
+
# Check if it's a Pydantic model or Optional Pydantic model
|
|
216
|
+
actual_type = type_hint
|
|
217
|
+
if is_optional_type(type_hint):
|
|
218
|
+
actual_type = get_optional_inner_type(type_hint)
|
|
219
|
+
|
|
220
|
+
if is_pydantic_model(actual_type):
|
|
221
|
+
return True
|
|
222
|
+
|
|
223
|
+
return False
|
|
224
|
+
|
|
225
|
+
except Exception:
|
|
226
|
+
# If we can't determine, err on the side of not converting
|
|
227
|
+
return False
|