agentfield 0.1.22rc2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agentfield/__init__.py +66 -0
- agentfield/agent.py +3569 -0
- agentfield/agent_ai.py +1125 -0
- agentfield/agent_cli.py +386 -0
- agentfield/agent_field_handler.py +494 -0
- agentfield/agent_mcp.py +534 -0
- agentfield/agent_registry.py +29 -0
- agentfield/agent_server.py +1185 -0
- agentfield/agent_utils.py +269 -0
- agentfield/agent_workflow.py +323 -0
- agentfield/async_config.py +278 -0
- agentfield/async_execution_manager.py +1227 -0
- agentfield/client.py +1447 -0
- agentfield/connection_manager.py +280 -0
- agentfield/decorators.py +527 -0
- agentfield/did_manager.py +337 -0
- agentfield/dynamic_skills.py +304 -0
- agentfield/execution_context.py +255 -0
- agentfield/execution_state.py +453 -0
- agentfield/http_connection_manager.py +429 -0
- agentfield/litellm_adapters.py +140 -0
- agentfield/logger.py +249 -0
- agentfield/mcp_client.py +204 -0
- agentfield/mcp_manager.py +340 -0
- agentfield/mcp_stdio_bridge.py +550 -0
- agentfield/memory.py +723 -0
- agentfield/memory_events.py +489 -0
- agentfield/multimodal.py +173 -0
- agentfield/multimodal_response.py +403 -0
- agentfield/pydantic_utils.py +227 -0
- agentfield/rate_limiter.py +280 -0
- agentfield/result_cache.py +441 -0
- agentfield/router.py +190 -0
- agentfield/status.py +70 -0
- agentfield/types.py +710 -0
- agentfield/utils.py +26 -0
- agentfield/vc_generator.py +464 -0
- agentfield/vision.py +198 -0
- agentfield-0.1.22rc2.dist-info/METADATA +102 -0
- agentfield-0.1.22rc2.dist-info/RECORD +42 -0
- agentfield-0.1.22rc2.dist-info/WHEEL +5 -0
- agentfield-0.1.22rc2.dist-info/top_level.txt +1 -0
agentfield/utils.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
import socket
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
def get_free_port(start_port=8001, end_port=8999):
|
|
5
|
+
"""
|
|
6
|
+
Find an available port in the specified range.
|
|
7
|
+
|
|
8
|
+
Args:
|
|
9
|
+
start_port (int): Start of port range
|
|
10
|
+
end_port (int): End of port range
|
|
11
|
+
|
|
12
|
+
Returns:
|
|
13
|
+
int: Available port number
|
|
14
|
+
|
|
15
|
+
Raises:
|
|
16
|
+
RuntimeError: If no free port found in range
|
|
17
|
+
"""
|
|
18
|
+
for port in range(start_port, end_port + 1):
|
|
19
|
+
try:
|
|
20
|
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
|
21
|
+
s.bind(("localhost", port))
|
|
22
|
+
return port
|
|
23
|
+
except OSError:
|
|
24
|
+
continue
|
|
25
|
+
|
|
26
|
+
raise RuntimeError(f"No free port found in range {start_port}-{end_port}")
|
|
@@ -0,0 +1,464 @@
|
|
|
1
|
+
"""
|
|
2
|
+
VC Generator for AgentField SDK
|
|
3
|
+
|
|
4
|
+
Handles Verifiable Credentials (VC) generation and verification for agent executions.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
from typing import Dict, List, Optional, Any
|
|
9
|
+
from dataclasses import dataclass
|
|
10
|
+
import requests
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
|
|
13
|
+
from .logger import get_logger
|
|
14
|
+
from .status import normalize_status
|
|
15
|
+
|
|
16
|
+
logger = get_logger(__name__)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@dataclass
|
|
20
|
+
class ExecutionVC:
|
|
21
|
+
"""Represents a verifiable credential for an execution."""
|
|
22
|
+
|
|
23
|
+
vc_id: str
|
|
24
|
+
execution_id: str
|
|
25
|
+
workflow_id: str
|
|
26
|
+
session_id: str
|
|
27
|
+
issuer_did: str
|
|
28
|
+
target_did: str
|
|
29
|
+
caller_did: str
|
|
30
|
+
vc_document: Dict[str, Any]
|
|
31
|
+
signature: str
|
|
32
|
+
input_hash: str
|
|
33
|
+
output_hash: str
|
|
34
|
+
status: str
|
|
35
|
+
created_at: datetime
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class WorkflowVC:
|
|
40
|
+
"""Represents a workflow-level verifiable credential."""
|
|
41
|
+
|
|
42
|
+
workflow_id: str
|
|
43
|
+
session_id: str
|
|
44
|
+
component_vcs: List[str]
|
|
45
|
+
workflow_vc_id: str
|
|
46
|
+
status: str
|
|
47
|
+
start_time: datetime
|
|
48
|
+
end_time: Optional[datetime]
|
|
49
|
+
total_steps: int
|
|
50
|
+
completed_steps: int
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class VCGenerator:
|
|
54
|
+
"""
|
|
55
|
+
Generates and manages verifiable credentials for agent executions.
|
|
56
|
+
|
|
57
|
+
Handles:
|
|
58
|
+
- Execution VC generation
|
|
59
|
+
- Workflow VC aggregation
|
|
60
|
+
- VC verification
|
|
61
|
+
- Integration with AgentField Server
|
|
62
|
+
"""
|
|
63
|
+
|
|
64
|
+
def __init__(self, agentfield_server_url: str, api_key: Optional[str] = None):
|
|
65
|
+
"""
|
|
66
|
+
Initialize VC Generator.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
agentfield_server_url: URL of the AgentField Server
|
|
70
|
+
api_key: Optional API key for authentication
|
|
71
|
+
"""
|
|
72
|
+
self.agentfield_server_url = agentfield_server_url.rstrip("/")
|
|
73
|
+
self.api_key = api_key
|
|
74
|
+
self.enabled = False
|
|
75
|
+
|
|
76
|
+
def _get_auth_headers(self) -> Dict[str, str]:
|
|
77
|
+
"""Return auth headers if API key is configured."""
|
|
78
|
+
if not self.api_key:
|
|
79
|
+
return {}
|
|
80
|
+
return {"X-API-Key": self.api_key}
|
|
81
|
+
|
|
82
|
+
def set_enabled(self, enabled: bool):
|
|
83
|
+
"""Enable or disable VC generation."""
|
|
84
|
+
self.enabled = enabled
|
|
85
|
+
|
|
86
|
+
def generate_execution_vc(
|
|
87
|
+
self,
|
|
88
|
+
execution_context: Any,
|
|
89
|
+
input_data: Any,
|
|
90
|
+
output_data: Any,
|
|
91
|
+
status: str,
|
|
92
|
+
error_message: Optional[str] = None,
|
|
93
|
+
duration_ms: int = 0,
|
|
94
|
+
) -> Optional[ExecutionVC]:
|
|
95
|
+
"""
|
|
96
|
+
Generate a verifiable credential for an execution.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
execution_context: ExecutionContext from DIDManager
|
|
100
|
+
input_data: Input data for the execution
|
|
101
|
+
output_data: Output data from the execution
|
|
102
|
+
status: Execution status (success, error, etc.)
|
|
103
|
+
error_message: Error message if execution failed
|
|
104
|
+
duration_ms: Execution duration in milliseconds
|
|
105
|
+
|
|
106
|
+
Returns:
|
|
107
|
+
ExecutionVC if successful, None otherwise
|
|
108
|
+
"""
|
|
109
|
+
if not self.enabled:
|
|
110
|
+
return None
|
|
111
|
+
|
|
112
|
+
try:
|
|
113
|
+
logger.debug(
|
|
114
|
+
f"Generating VC for execution: {execution_context.execution_id}"
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
# Prepare VC generation request
|
|
118
|
+
vc_data = {
|
|
119
|
+
"execution_context": {
|
|
120
|
+
"execution_id": execution_context.execution_id,
|
|
121
|
+
"workflow_id": execution_context.workflow_id,
|
|
122
|
+
"session_id": execution_context.session_id,
|
|
123
|
+
"caller_did": execution_context.caller_did,
|
|
124
|
+
"target_did": execution_context.target_did,
|
|
125
|
+
"agent_node_did": execution_context.agent_node_did,
|
|
126
|
+
"timestamp": execution_context.timestamp.isoformat() + "Z"
|
|
127
|
+
if execution_context.timestamp.tzinfo is None
|
|
128
|
+
else execution_context.timestamp.isoformat(),
|
|
129
|
+
},
|
|
130
|
+
"input_data": self._serialize_data_for_json(input_data),
|
|
131
|
+
"output_data": self._serialize_data_for_json(output_data),
|
|
132
|
+
"status": normalize_status(status),
|
|
133
|
+
"error_message": error_message,
|
|
134
|
+
"duration_ms": duration_ms,
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
# Send VC generation request to AgentField Server
|
|
138
|
+
headers = {"Content-Type": "application/json"}
|
|
139
|
+
headers.update(self._get_auth_headers())
|
|
140
|
+
response = requests.post(
|
|
141
|
+
f"{self.agentfield_server_url}/api/v1/execution/vc",
|
|
142
|
+
json=vc_data,
|
|
143
|
+
headers=headers,
|
|
144
|
+
timeout=10,
|
|
145
|
+
)
|
|
146
|
+
|
|
147
|
+
if response.status_code == 200:
|
|
148
|
+
result = response.json()
|
|
149
|
+
logger.debug(
|
|
150
|
+
f"VC generation successful for execution: {execution_context.execution_id}"
|
|
151
|
+
)
|
|
152
|
+
return self._parse_execution_vc(result)
|
|
153
|
+
else:
|
|
154
|
+
logger.warning(
|
|
155
|
+
f"Failed to generate execution VC: {response.status_code} - {response.text}"
|
|
156
|
+
)
|
|
157
|
+
return None
|
|
158
|
+
|
|
159
|
+
except Exception as e:
|
|
160
|
+
logger.error(f"Error generating execution VC: {e}")
|
|
161
|
+
return None
|
|
162
|
+
|
|
163
|
+
def verify_vc(self, vc_document: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
|
164
|
+
"""
|
|
165
|
+
Verify a verifiable credential.
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
vc_document: VC document to verify
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
Verification result if successful, None otherwise
|
|
172
|
+
"""
|
|
173
|
+
try:
|
|
174
|
+
verification_data = {"vc_document": vc_document}
|
|
175
|
+
|
|
176
|
+
headers = {"Content-Type": "application/json"}
|
|
177
|
+
headers.update(self._get_auth_headers())
|
|
178
|
+
response = requests.post(
|
|
179
|
+
f"{self.agentfield_server_url}/api/v1/did/verify",
|
|
180
|
+
json=verification_data,
|
|
181
|
+
headers=headers,
|
|
182
|
+
timeout=10,
|
|
183
|
+
)
|
|
184
|
+
|
|
185
|
+
if response.status_code == 200:
|
|
186
|
+
return response.json()
|
|
187
|
+
else:
|
|
188
|
+
logger.warning(
|
|
189
|
+
f"Failed to verify VC: {response.status_code} - {response.text}"
|
|
190
|
+
)
|
|
191
|
+
return None
|
|
192
|
+
|
|
193
|
+
except Exception as e:
|
|
194
|
+
logger.error(f"Error verifying VC: {e}")
|
|
195
|
+
return None
|
|
196
|
+
|
|
197
|
+
def get_workflow_vc_chain(self, workflow_id: str) -> Optional[Dict[str, Any]]:
|
|
198
|
+
"""
|
|
199
|
+
Get the complete VC chain for a workflow.
|
|
200
|
+
|
|
201
|
+
Args:
|
|
202
|
+
workflow_id: Workflow identifier
|
|
203
|
+
|
|
204
|
+
Returns:
|
|
205
|
+
Workflow VC chain if successful, None otherwise
|
|
206
|
+
"""
|
|
207
|
+
try:
|
|
208
|
+
response = requests.get(
|
|
209
|
+
f"{self.agentfield_server_url}/api/v1/did/workflow/{workflow_id}/vc-chain",
|
|
210
|
+
headers=self._get_auth_headers(),
|
|
211
|
+
timeout=10,
|
|
212
|
+
)
|
|
213
|
+
|
|
214
|
+
if response.status_code == 200:
|
|
215
|
+
return response.json()
|
|
216
|
+
else:
|
|
217
|
+
logger.warning(
|
|
218
|
+
f"Failed to get workflow VC chain: {response.status_code} - {response.text}"
|
|
219
|
+
)
|
|
220
|
+
return None
|
|
221
|
+
|
|
222
|
+
except Exception as e:
|
|
223
|
+
logger.error(f"Error getting workflow VC chain: {e}")
|
|
224
|
+
return None
|
|
225
|
+
|
|
226
|
+
def create_workflow_vc(
|
|
227
|
+
self, workflow_id: str, session_id: str, execution_vc_ids: List[str]
|
|
228
|
+
) -> Optional[WorkflowVC]:
|
|
229
|
+
"""
|
|
230
|
+
Create a workflow-level VC that aggregates execution VCs.
|
|
231
|
+
|
|
232
|
+
Args:
|
|
233
|
+
workflow_id: Workflow identifier
|
|
234
|
+
session_id: Session identifier
|
|
235
|
+
execution_vc_ids: List of execution VC IDs to aggregate
|
|
236
|
+
|
|
237
|
+
Returns:
|
|
238
|
+
WorkflowVC if successful, None otherwise
|
|
239
|
+
"""
|
|
240
|
+
try:
|
|
241
|
+
workflow_data = {
|
|
242
|
+
"session_id": session_id,
|
|
243
|
+
"execution_vc_ids": execution_vc_ids,
|
|
244
|
+
}
|
|
245
|
+
|
|
246
|
+
headers = {"Content-Type": "application/json"}
|
|
247
|
+
headers.update(self._get_auth_headers())
|
|
248
|
+
response = requests.post(
|
|
249
|
+
f"{self.agentfield_server_url}/api/v1/did/workflow/{workflow_id}/vc",
|
|
250
|
+
json=workflow_data,
|
|
251
|
+
headers=headers,
|
|
252
|
+
timeout=10,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
if response.status_code == 200:
|
|
256
|
+
result = response.json()
|
|
257
|
+
return self._parse_workflow_vc(result)
|
|
258
|
+
else:
|
|
259
|
+
logger.warning(
|
|
260
|
+
f"Failed to create workflow VC: {response.status_code} - {response.text}"
|
|
261
|
+
)
|
|
262
|
+
return None
|
|
263
|
+
|
|
264
|
+
except Exception as e:
|
|
265
|
+
logger.error(f"Error creating workflow VC: {e}")
|
|
266
|
+
return None
|
|
267
|
+
|
|
268
|
+
def export_vcs(
|
|
269
|
+
self, filters: Optional[Dict[str, Any]] = None
|
|
270
|
+
) -> Optional[List[Dict[str, Any]]]:
|
|
271
|
+
"""
|
|
272
|
+
Export VCs for external verification.
|
|
273
|
+
|
|
274
|
+
Args:
|
|
275
|
+
filters: Optional filters for VC export
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
List of VCs if successful, None otherwise
|
|
279
|
+
"""
|
|
280
|
+
try:
|
|
281
|
+
params = filters or {}
|
|
282
|
+
|
|
283
|
+
response = requests.get(
|
|
284
|
+
f"{self.agentfield_server_url}/api/v1/did/export/vcs",
|
|
285
|
+
params=params,
|
|
286
|
+
headers=self._get_auth_headers(),
|
|
287
|
+
timeout=30,
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
if response.status_code == 200:
|
|
291
|
+
return response.json()
|
|
292
|
+
else:
|
|
293
|
+
logger.warning(
|
|
294
|
+
f"Failed to export VCs: {response.status_code} - {response.text}"
|
|
295
|
+
)
|
|
296
|
+
return None
|
|
297
|
+
|
|
298
|
+
except Exception as e:
|
|
299
|
+
logger.error(f"Error exporting VCs: {e}")
|
|
300
|
+
return None
|
|
301
|
+
|
|
302
|
+
def is_enabled(self) -> bool:
|
|
303
|
+
"""Check if VC generation is enabled."""
|
|
304
|
+
return self.enabled
|
|
305
|
+
|
|
306
|
+
def _serialize_data(self, data: Any) -> bytes:
|
|
307
|
+
"""Serialize data for VC generation."""
|
|
308
|
+
if data is None:
|
|
309
|
+
return b""
|
|
310
|
+
|
|
311
|
+
if isinstance(data, (str, bytes)):
|
|
312
|
+
return data.encode() if isinstance(data, str) else data
|
|
313
|
+
|
|
314
|
+
# For complex objects, serialize to JSON
|
|
315
|
+
try:
|
|
316
|
+
return json.dumps(data, sort_keys=True).encode()
|
|
317
|
+
except Exception:
|
|
318
|
+
return str(data).encode()
|
|
319
|
+
|
|
320
|
+
def _serialize_data_for_json(self, data: Any) -> str:
|
|
321
|
+
"""Serialize data for JSON transmission as base64-encoded string."""
|
|
322
|
+
import base64
|
|
323
|
+
|
|
324
|
+
if data is None:
|
|
325
|
+
return ""
|
|
326
|
+
|
|
327
|
+
# Convert data to string first
|
|
328
|
+
if isinstance(data, str):
|
|
329
|
+
data_str = data
|
|
330
|
+
elif isinstance(data, bytes):
|
|
331
|
+
data_str = data.decode("utf-8", errors="replace")
|
|
332
|
+
else:
|
|
333
|
+
# For complex objects, serialize to JSON string
|
|
334
|
+
try:
|
|
335
|
+
data_str = json.dumps(data, sort_keys=True)
|
|
336
|
+
except Exception:
|
|
337
|
+
data_str = str(data)
|
|
338
|
+
|
|
339
|
+
# Encode as base64 for transmission to Go server
|
|
340
|
+
return base64.b64encode(data_str.encode("utf-8")).decode("ascii")
|
|
341
|
+
|
|
342
|
+
def _parse_execution_vc(self, vc_data: Dict[str, Any]) -> ExecutionVC:
|
|
343
|
+
"""Parse execution VC from API response."""
|
|
344
|
+
return ExecutionVC(
|
|
345
|
+
vc_id=vc_data["vc_id"],
|
|
346
|
+
execution_id=vc_data["execution_id"],
|
|
347
|
+
workflow_id=vc_data["workflow_id"],
|
|
348
|
+
session_id=vc_data["session_id"],
|
|
349
|
+
issuer_did=vc_data["issuer_did"],
|
|
350
|
+
target_did=vc_data["target_did"],
|
|
351
|
+
caller_did=vc_data["caller_did"],
|
|
352
|
+
vc_document=vc_data["vc_document"],
|
|
353
|
+
signature=vc_data["signature"],
|
|
354
|
+
input_hash=vc_data["input_hash"],
|
|
355
|
+
output_hash=vc_data["output_hash"],
|
|
356
|
+
status=vc_data["status"],
|
|
357
|
+
created_at=datetime.fromisoformat(
|
|
358
|
+
vc_data["created_at"].replace("Z", "+00:00")
|
|
359
|
+
),
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
def _parse_workflow_vc(self, vc_data: Dict[str, Any]) -> WorkflowVC:
|
|
363
|
+
"""Parse workflow VC from API response."""
|
|
364
|
+
end_time = None
|
|
365
|
+
if vc_data.get("end_time"):
|
|
366
|
+
end_time = datetime.fromisoformat(
|
|
367
|
+
vc_data["end_time"].replace("Z", "+00:00")
|
|
368
|
+
)
|
|
369
|
+
|
|
370
|
+
return WorkflowVC(
|
|
371
|
+
workflow_id=vc_data["workflow_id"],
|
|
372
|
+
session_id=vc_data["session_id"],
|
|
373
|
+
component_vcs=vc_data["component_vcs"],
|
|
374
|
+
workflow_vc_id=vc_data["workflow_vc_id"],
|
|
375
|
+
status=vc_data["status"],
|
|
376
|
+
start_time=datetime.fromisoformat(
|
|
377
|
+
vc_data["start_time"].replace("Z", "+00:00")
|
|
378
|
+
),
|
|
379
|
+
end_time=end_time,
|
|
380
|
+
total_steps=vc_data["total_steps"],
|
|
381
|
+
completed_steps=vc_data["completed_steps"],
|
|
382
|
+
)
|
|
383
|
+
|
|
384
|
+
|
|
385
|
+
class VCContext:
|
|
386
|
+
"""
|
|
387
|
+
Context manager for VC-enabled execution.
|
|
388
|
+
|
|
389
|
+
Automatically generates VCs for code blocks when used as a context manager.
|
|
390
|
+
"""
|
|
391
|
+
|
|
392
|
+
def __init__(
|
|
393
|
+
self, vc_generator: VCGenerator, execution_context: Any, function_name: str
|
|
394
|
+
):
|
|
395
|
+
"""
|
|
396
|
+
Initialize VC context.
|
|
397
|
+
|
|
398
|
+
Args:
|
|
399
|
+
vc_generator: VCGenerator instance
|
|
400
|
+
execution_context: ExecutionContext from DIDManager
|
|
401
|
+
function_name: Name of the function being executed
|
|
402
|
+
"""
|
|
403
|
+
self.vc_generator = vc_generator
|
|
404
|
+
self.execution_context = execution_context
|
|
405
|
+
self.function_name = function_name
|
|
406
|
+
self.start_time = None
|
|
407
|
+
self.input_data = None
|
|
408
|
+
self.output_data = None
|
|
409
|
+
self.error_message = None
|
|
410
|
+
self.status = "success"
|
|
411
|
+
|
|
412
|
+
def __enter__(self):
|
|
413
|
+
"""Enter the context manager."""
|
|
414
|
+
self.start_time = datetime.utcnow()
|
|
415
|
+
return self
|
|
416
|
+
|
|
417
|
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
418
|
+
"""Exit the context manager and generate VC."""
|
|
419
|
+
if not self.vc_generator.is_enabled():
|
|
420
|
+
return
|
|
421
|
+
|
|
422
|
+
# Calculate duration
|
|
423
|
+
if self.start_time:
|
|
424
|
+
duration_ms = int(
|
|
425
|
+
(datetime.utcnow() - self.start_time).total_seconds() * 1000
|
|
426
|
+
)
|
|
427
|
+
else:
|
|
428
|
+
duration_ms = 0
|
|
429
|
+
|
|
430
|
+
# Set status based on exception
|
|
431
|
+
if exc_type is not None:
|
|
432
|
+
self.status = "error"
|
|
433
|
+
self.error_message = str(exc_val) if exc_val else "Unknown error"
|
|
434
|
+
|
|
435
|
+
# Generate VC
|
|
436
|
+
try:
|
|
437
|
+
vc = self.vc_generator.generate_execution_vc(
|
|
438
|
+
execution_context=self.execution_context,
|
|
439
|
+
input_data=self.input_data,
|
|
440
|
+
output_data=self.output_data,
|
|
441
|
+
status=self.status,
|
|
442
|
+
error_message=self.error_message,
|
|
443
|
+
duration_ms=duration_ms,
|
|
444
|
+
)
|
|
445
|
+
|
|
446
|
+
if vc:
|
|
447
|
+
logger.debug(
|
|
448
|
+
f"Generated VC {vc.vc_id} for execution {self.execution_context.execution_id}"
|
|
449
|
+
)
|
|
450
|
+
else:
|
|
451
|
+
logger.warning(
|
|
452
|
+
f"Failed to generate VC for execution {self.execution_context.execution_id}"
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
except Exception as e:
|
|
456
|
+
logger.error(f"Error in VC context manager: {e}")
|
|
457
|
+
|
|
458
|
+
def set_input_data(self, data: Any):
|
|
459
|
+
"""Set input data for VC generation."""
|
|
460
|
+
self.input_data = data
|
|
461
|
+
|
|
462
|
+
def set_output_data(self, data: Any):
|
|
463
|
+
"""Set output data for VC generation."""
|
|
464
|
+
self.output_data = data
|
agentfield/vision.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Image Generation Module
|
|
3
|
+
|
|
4
|
+
Handles image generation across multiple providers (LiteLLM, OpenRouter).
|
|
5
|
+
Keeps provider-specific implementation details separate from the main agent code.
|
|
6
|
+
|
|
7
|
+
Supported Providers:
|
|
8
|
+
- LiteLLM: DALL-E, Azure DALL-E, Bedrock Stable Diffusion, etc.
|
|
9
|
+
- OpenRouter: Gemini image generation, etc.
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from typing import Any, Optional
|
|
13
|
+
from agentfield.logger import log_error
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
async def generate_image_litellm(
|
|
17
|
+
prompt: str,
|
|
18
|
+
model: str,
|
|
19
|
+
size: str,
|
|
20
|
+
quality: str,
|
|
21
|
+
style: Optional[str],
|
|
22
|
+
response_format: str,
|
|
23
|
+
**kwargs,
|
|
24
|
+
) -> Any:
|
|
25
|
+
"""
|
|
26
|
+
Generate image using LiteLLM's image generation API.
|
|
27
|
+
|
|
28
|
+
This function uses LiteLLM's `aimage_generation()` which supports:
|
|
29
|
+
- OpenAI DALL-E (dall-e-3, dall-e-2)
|
|
30
|
+
- Azure DALL-E
|
|
31
|
+
- AWS Bedrock Stable Diffusion
|
|
32
|
+
- And other LiteLLM-supported image generation models
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
prompt: Text prompt for image generation
|
|
36
|
+
model: Model to use (e.g., "dall-e-3", "azure/dall-e-3")
|
|
37
|
+
size: Image size (e.g., "1024x1024", "1792x1024")
|
|
38
|
+
quality: Image quality ("standard", "hd")
|
|
39
|
+
style: Image style ("vivid", "natural") - DALL-E 3 only
|
|
40
|
+
response_format: Response format ("url", "b64_json")
|
|
41
|
+
**kwargs: Additional LiteLLM parameters
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
MultimodalResponse with generated image(s)
|
|
45
|
+
|
|
46
|
+
Raises:
|
|
47
|
+
ImportError: If litellm is not installed
|
|
48
|
+
Exception: If image generation fails
|
|
49
|
+
"""
|
|
50
|
+
try:
|
|
51
|
+
import litellm
|
|
52
|
+
except ImportError:
|
|
53
|
+
raise ImportError(
|
|
54
|
+
"litellm is not installed. Please install it with `pip install litellm`."
|
|
55
|
+
)
|
|
56
|
+
|
|
57
|
+
# Prepare image generation parameters
|
|
58
|
+
image_params = {
|
|
59
|
+
"prompt": prompt,
|
|
60
|
+
"model": model,
|
|
61
|
+
"size": size,
|
|
62
|
+
"quality": quality,
|
|
63
|
+
"response_format": response_format,
|
|
64
|
+
**kwargs,
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
# Add style parameter only for DALL-E 3
|
|
68
|
+
if style and "dall-e-3" in model:
|
|
69
|
+
image_params["style"] = style
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
# Use LiteLLM's image generation function
|
|
73
|
+
response = await litellm.aimage_generation(**image_params)
|
|
74
|
+
|
|
75
|
+
# Import multimodal response detection
|
|
76
|
+
from agentfield.multimodal_response import detect_multimodal_response
|
|
77
|
+
|
|
78
|
+
# Detect and wrap multimodal content
|
|
79
|
+
return detect_multimodal_response(response)
|
|
80
|
+
|
|
81
|
+
except Exception as e:
|
|
82
|
+
log_error(f"LiteLLM image generation failed: {e}")
|
|
83
|
+
raise
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
async def generate_image_openrouter(
|
|
87
|
+
prompt: str,
|
|
88
|
+
model: str,
|
|
89
|
+
size: str,
|
|
90
|
+
quality: str,
|
|
91
|
+
style: Optional[str],
|
|
92
|
+
response_format: str,
|
|
93
|
+
**kwargs,
|
|
94
|
+
) -> Any:
|
|
95
|
+
"""
|
|
96
|
+
Generate image using OpenRouter's chat completions API.
|
|
97
|
+
|
|
98
|
+
OpenRouter uses modalities to enable image generation through
|
|
99
|
+
the standard chat completions endpoint. This is different from
|
|
100
|
+
LiteLLM's dedicated image generation API.
|
|
101
|
+
|
|
102
|
+
Supported models:
|
|
103
|
+
- google/gemini-2.5-flash-image-preview
|
|
104
|
+
- And other OpenRouter models with image generation capabilities
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
prompt: Text prompt for image generation
|
|
108
|
+
model: OpenRouter model (must start with "openrouter/")
|
|
109
|
+
size: Image size (may not be used by all OpenRouter models)
|
|
110
|
+
quality: Image quality (may not be used by all OpenRouter models)
|
|
111
|
+
style: Image style (may not be used by all OpenRouter models)
|
|
112
|
+
response_format: Response format (may not be used by all OpenRouter models)
|
|
113
|
+
**kwargs: Additional OpenRouter-specific parameters (e.g., image_config)
|
|
114
|
+
|
|
115
|
+
Returns:
|
|
116
|
+
MultimodalResponse with generated image(s)
|
|
117
|
+
|
|
118
|
+
Raises:
|
|
119
|
+
ImportError: If litellm is not installed
|
|
120
|
+
Exception: If image generation fails
|
|
121
|
+
|
|
122
|
+
Note:
|
|
123
|
+
OpenRouter-specific parameters like `image_config` should be passed via kwargs.
|
|
124
|
+
Example: image_config={"aspect_ratio": "16:9"}
|
|
125
|
+
"""
|
|
126
|
+
try:
|
|
127
|
+
import litellm
|
|
128
|
+
except ImportError:
|
|
129
|
+
raise ImportError(
|
|
130
|
+
"litellm is not installed. Please install it with `pip install litellm`."
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
from agentfield.multimodal_response import ImageOutput, MultimodalResponse
|
|
134
|
+
|
|
135
|
+
# Build messages for OpenRouter chat completions
|
|
136
|
+
messages = [{"role": "user", "content": prompt}]
|
|
137
|
+
|
|
138
|
+
# Prepare parameters for OpenRouter
|
|
139
|
+
# OpenRouter uses chat completions with modalities parameter
|
|
140
|
+
completion_params = {
|
|
141
|
+
"model": model,
|
|
142
|
+
"messages": messages,
|
|
143
|
+
"modalities": ["image", "text"],
|
|
144
|
+
**kwargs, # Pass through any additional kwargs (e.g., image_config)
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
try:
|
|
148
|
+
# Use LiteLLM's completion function (OpenRouter uses chat API)
|
|
149
|
+
response = await litellm.acompletion(**completion_params)
|
|
150
|
+
|
|
151
|
+
# Extract images from OpenRouter response
|
|
152
|
+
# OpenRouter returns images in choices[0].message.images
|
|
153
|
+
images = []
|
|
154
|
+
text_content = ""
|
|
155
|
+
|
|
156
|
+
if hasattr(response, "choices") and len(response.choices) > 0:
|
|
157
|
+
message = response.choices[0].message
|
|
158
|
+
|
|
159
|
+
# Extract text content
|
|
160
|
+
if hasattr(message, "content") and message.content:
|
|
161
|
+
text_content = message.content
|
|
162
|
+
|
|
163
|
+
# Extract images
|
|
164
|
+
if hasattr(message, "images") and message.images:
|
|
165
|
+
for img_data in message.images:
|
|
166
|
+
# OpenRouter images have structure: {"type": "image_url", "image_url": {"url": "data:..."}}
|
|
167
|
+
if hasattr(img_data, "image_url"):
|
|
168
|
+
image_url = (
|
|
169
|
+
img_data.image_url.url
|
|
170
|
+
if hasattr(img_data.image_url, "url")
|
|
171
|
+
else None
|
|
172
|
+
)
|
|
173
|
+
elif isinstance(img_data, dict) and "image_url" in img_data:
|
|
174
|
+
image_url = img_data["image_url"].get("url")
|
|
175
|
+
else:
|
|
176
|
+
image_url = None
|
|
177
|
+
|
|
178
|
+
if image_url:
|
|
179
|
+
images.append(
|
|
180
|
+
ImageOutput(
|
|
181
|
+
url=image_url,
|
|
182
|
+
b64_json=None,
|
|
183
|
+
revised_prompt=None,
|
|
184
|
+
)
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Create MultimodalResponse
|
|
188
|
+
return MultimodalResponse(
|
|
189
|
+
text=text_content or prompt,
|
|
190
|
+
audio=None,
|
|
191
|
+
images=images,
|
|
192
|
+
files=[],
|
|
193
|
+
raw_response=response,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
except Exception as e:
|
|
197
|
+
log_error(f"OpenRouter image generation failed: {e}")
|
|
198
|
+
raise
|