agent-framework-devui 1.0.0b251001__py3-none-any.whl → 1.0.0b251016__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agent-framework-devui might be problematic. Click here for more details.
- agent_framework_devui/_conversations.py +473 -0
- agent_framework_devui/_discovery.py +323 -255
- agent_framework_devui/_executor.py +146 -281
- agent_framework_devui/_mapper.py +307 -128
- agent_framework_devui/_server.py +232 -192
- agent_framework_devui/_session.py +3 -3
- agent_framework_devui/_utils.py +548 -0
- agent_framework_devui/models/__init__.py +15 -10
- agent_framework_devui/models/_discovery_models.py +8 -2
- agent_framework_devui/models/_openai_custom.py +45 -90
- agent_framework_devui/ui/agentframework.svg +33 -0
- agent_framework_devui/ui/assets/index-CE4pGoXh.css +1 -0
- agent_framework_devui/ui/assets/index-DmL7WSFa.js +577 -0
- agent_framework_devui/ui/index.html +3 -3
- agent_framework_devui-1.0.0b251016.dist-info/METADATA +286 -0
- agent_framework_devui-1.0.0b251016.dist-info/RECORD +23 -0
- agent_framework_devui/ui/assets/index-D1AmQWga.css +0 -1
- agent_framework_devui/ui/assets/index-DPEaaIdK.js +0 -435
- agent_framework_devui-1.0.0b251001.dist-info/METADATA +0 -172
- agent_framework_devui-1.0.0b251001.dist-info/RECORD +0 -20
- {agent_framework_devui-1.0.0b251001.dist-info → agent_framework_devui-1.0.0b251016.dist-info}/WHEEL +0 -0
- {agent_framework_devui-1.0.0b251001.dist-info → agent_framework_devui-1.0.0b251016.dist-info}/entry_points.txt +0 -0
- {agent_framework_devui-1.0.0b251001.dist-info → agent_framework_devui-1.0.0b251016.dist-info}/licenses/LICENSE +0 -0
|
@@ -5,12 +5,12 @@
|
|
|
5
5
|
import json
|
|
6
6
|
import logging
|
|
7
7
|
import os
|
|
8
|
-
import uuid
|
|
9
8
|
from collections.abc import AsyncGenerator
|
|
10
9
|
from typing import Any
|
|
11
10
|
|
|
12
|
-
from agent_framework import
|
|
11
|
+
from agent_framework import AgentProtocol
|
|
13
12
|
|
|
13
|
+
from ._conversations import ConversationStore, InMemoryConversationStore
|
|
14
14
|
from ._discovery import EntityDiscovery
|
|
15
15
|
from ._mapper import MessageMapper
|
|
16
16
|
from ._tracing import capture_traces
|
|
@@ -29,21 +29,26 @@ class EntityNotFoundError(Exception):
|
|
|
29
29
|
class AgentFrameworkExecutor:
|
|
30
30
|
"""Executor for Agent Framework entities - agents and workflows."""
|
|
31
31
|
|
|
32
|
-
def __init__(
|
|
32
|
+
def __init__(
|
|
33
|
+
self,
|
|
34
|
+
entity_discovery: EntityDiscovery,
|
|
35
|
+
message_mapper: MessageMapper,
|
|
36
|
+
conversation_store: ConversationStore | None = None,
|
|
37
|
+
):
|
|
33
38
|
"""Initialize Agent Framework executor.
|
|
34
39
|
|
|
35
40
|
Args:
|
|
36
41
|
entity_discovery: Entity discovery instance
|
|
37
42
|
message_mapper: Message mapper instance
|
|
43
|
+
conversation_store: Optional conversation store (defaults to in-memory)
|
|
38
44
|
"""
|
|
39
45
|
self.entity_discovery = entity_discovery
|
|
40
46
|
self.message_mapper = message_mapper
|
|
41
47
|
self._setup_tracing_provider()
|
|
42
48
|
self._setup_agent_framework_tracing()
|
|
43
49
|
|
|
44
|
-
#
|
|
45
|
-
self.
|
|
46
|
-
self.agent_threads: dict[str, list[str]] = {} # agent_id -> thread_ids
|
|
50
|
+
# Use provided conversation store or default to in-memory
|
|
51
|
+
self.conversation_store = conversation_store or InMemoryConversationStore()
|
|
47
52
|
|
|
48
53
|
def _setup_tracing_provider(self) -> None:
|
|
49
54
|
"""Set up our own TracerProvider so we can add processors."""
|
|
@@ -83,181 +88,6 @@ class AgentFrameworkExecutor:
|
|
|
83
88
|
else:
|
|
84
89
|
logger.debug("ENABLE_OTEL not set, skipping observability setup")
|
|
85
90
|
|
|
86
|
-
# Thread Management Methods
|
|
87
|
-
def create_thread(self, agent_id: str) -> str:
|
|
88
|
-
"""Create new thread for agent."""
|
|
89
|
-
thread_id = f"thread_{uuid.uuid4().hex[:8]}"
|
|
90
|
-
thread = AgentThread()
|
|
91
|
-
|
|
92
|
-
self.thread_storage[thread_id] = thread
|
|
93
|
-
|
|
94
|
-
if agent_id not in self.agent_threads:
|
|
95
|
-
self.agent_threads[agent_id] = []
|
|
96
|
-
self.agent_threads[agent_id].append(thread_id)
|
|
97
|
-
|
|
98
|
-
return thread_id
|
|
99
|
-
|
|
100
|
-
def get_thread(self, thread_id: str) -> AgentThread | None:
|
|
101
|
-
"""Get AgentThread by ID."""
|
|
102
|
-
return self.thread_storage.get(thread_id)
|
|
103
|
-
|
|
104
|
-
def list_threads_for_agent(self, agent_id: str) -> list[str]:
|
|
105
|
-
"""List thread IDs for agent."""
|
|
106
|
-
return self.agent_threads.get(agent_id, [])
|
|
107
|
-
|
|
108
|
-
def get_agent_for_thread(self, thread_id: str) -> str | None:
|
|
109
|
-
"""Find which agent owns this thread."""
|
|
110
|
-
for agent_id, thread_ids in self.agent_threads.items():
|
|
111
|
-
if thread_id in thread_ids:
|
|
112
|
-
return agent_id
|
|
113
|
-
return None
|
|
114
|
-
|
|
115
|
-
def delete_thread(self, thread_id: str) -> bool:
|
|
116
|
-
"""Delete thread."""
|
|
117
|
-
if thread_id not in self.thread_storage:
|
|
118
|
-
return False
|
|
119
|
-
|
|
120
|
-
for _agent_id, thread_ids in self.agent_threads.items():
|
|
121
|
-
if thread_id in thread_ids:
|
|
122
|
-
thread_ids.remove(thread_id)
|
|
123
|
-
break
|
|
124
|
-
|
|
125
|
-
del self.thread_storage[thread_id]
|
|
126
|
-
return True
|
|
127
|
-
|
|
128
|
-
async def get_thread_messages(self, thread_id: str) -> list[dict[str, Any]]:
|
|
129
|
-
"""Get messages from a thread's message store, preserving all content types for UI display."""
|
|
130
|
-
thread = self.get_thread(thread_id)
|
|
131
|
-
if not thread or not thread.message_store:
|
|
132
|
-
return []
|
|
133
|
-
|
|
134
|
-
try:
|
|
135
|
-
# Get AgentFramework ChatMessage objects from thread
|
|
136
|
-
af_messages = await thread.message_store.list_messages()
|
|
137
|
-
|
|
138
|
-
ui_messages = []
|
|
139
|
-
for i, af_msg in enumerate(af_messages):
|
|
140
|
-
# Extract role value (handle enum)
|
|
141
|
-
role = af_msg.role.value if hasattr(af_msg.role, "value") else str(af_msg.role)
|
|
142
|
-
|
|
143
|
-
# Skip tool/function messages - only show user and assistant messages
|
|
144
|
-
if role not in ["user", "assistant"]:
|
|
145
|
-
continue
|
|
146
|
-
|
|
147
|
-
# Extract all user-facing content (text, images, files, etc.)
|
|
148
|
-
display_contents = self._extract_display_contents(af_msg.contents)
|
|
149
|
-
|
|
150
|
-
# Skip messages with no displayable content
|
|
151
|
-
if not display_contents:
|
|
152
|
-
continue
|
|
153
|
-
|
|
154
|
-
ui_message = {
|
|
155
|
-
"id": af_msg.message_id or f"restored-{i}",
|
|
156
|
-
"role": role,
|
|
157
|
-
"contents": display_contents,
|
|
158
|
-
"timestamp": __import__("datetime").datetime.now().isoformat(),
|
|
159
|
-
"author_name": af_msg.author_name,
|
|
160
|
-
"message_id": af_msg.message_id,
|
|
161
|
-
}
|
|
162
|
-
|
|
163
|
-
ui_messages.append(ui_message)
|
|
164
|
-
|
|
165
|
-
logger.info(f"Restored {len(ui_messages)} display messages for thread {thread_id}")
|
|
166
|
-
return ui_messages
|
|
167
|
-
|
|
168
|
-
except Exception as e:
|
|
169
|
-
logger.error(f"Error getting thread messages: {e}")
|
|
170
|
-
import traceback
|
|
171
|
-
|
|
172
|
-
logger.error(traceback.format_exc())
|
|
173
|
-
return []
|
|
174
|
-
|
|
175
|
-
def _extract_display_contents(self, contents: list[Any]) -> list[dict[str, Any]]:
|
|
176
|
-
"""Extract all user-facing content (text, images, files, etc.) from message contents.
|
|
177
|
-
|
|
178
|
-
Filters out internal mechanics like function calls/results while preserving
|
|
179
|
-
all content types that should be displayed in the UI.
|
|
180
|
-
"""
|
|
181
|
-
display_contents = []
|
|
182
|
-
|
|
183
|
-
for content in contents:
|
|
184
|
-
content_type = getattr(content, "type", None)
|
|
185
|
-
|
|
186
|
-
# Text content
|
|
187
|
-
if content_type == "text":
|
|
188
|
-
text = getattr(content, "text", "")
|
|
189
|
-
|
|
190
|
-
# Handle double-encoded JSON from user messages
|
|
191
|
-
if text.startswith('{"role":'):
|
|
192
|
-
try:
|
|
193
|
-
import json
|
|
194
|
-
|
|
195
|
-
parsed = json.loads(text)
|
|
196
|
-
if parsed.get("contents"):
|
|
197
|
-
for sub_content in parsed["contents"]:
|
|
198
|
-
if sub_content.get("type") == "text":
|
|
199
|
-
display_contents.append({"type": "text", "text": sub_content.get("text", "")})
|
|
200
|
-
except Exception:
|
|
201
|
-
display_contents.append({"type": "text", "text": text})
|
|
202
|
-
else:
|
|
203
|
-
display_contents.append({"type": "text", "text": text})
|
|
204
|
-
|
|
205
|
-
# Data content (images, files, PDFs, etc.)
|
|
206
|
-
elif content_type == "data":
|
|
207
|
-
display_contents.append({
|
|
208
|
-
"type": "data",
|
|
209
|
-
"uri": getattr(content, "uri", ""),
|
|
210
|
-
"media_type": getattr(content, "media_type", None),
|
|
211
|
-
})
|
|
212
|
-
|
|
213
|
-
# URI content (external links to images/files)
|
|
214
|
-
elif content_type == "uri":
|
|
215
|
-
display_contents.append({
|
|
216
|
-
"type": "uri",
|
|
217
|
-
"uri": getattr(content, "uri", ""),
|
|
218
|
-
"media_type": getattr(content, "media_type", None),
|
|
219
|
-
})
|
|
220
|
-
|
|
221
|
-
# Skip function_call, function_result, and other internal content types
|
|
222
|
-
|
|
223
|
-
return display_contents
|
|
224
|
-
|
|
225
|
-
async def serialize_thread(self, thread_id: str) -> dict[str, Any] | None:
|
|
226
|
-
"""Serialize thread state for persistence."""
|
|
227
|
-
thread = self.get_thread(thread_id)
|
|
228
|
-
if not thread:
|
|
229
|
-
return None
|
|
230
|
-
|
|
231
|
-
try:
|
|
232
|
-
# Use AgentThread's built-in serialization
|
|
233
|
-
serialized_state = await thread.serialize()
|
|
234
|
-
|
|
235
|
-
# Add our metadata
|
|
236
|
-
agent_id = self.get_agent_for_thread(thread_id)
|
|
237
|
-
serialized_state["metadata"] = {"agent_id": agent_id, "thread_id": thread_id}
|
|
238
|
-
|
|
239
|
-
return serialized_state
|
|
240
|
-
|
|
241
|
-
except Exception as e:
|
|
242
|
-
logger.error(f"Error serializing thread {thread_id}: {e}")
|
|
243
|
-
return None
|
|
244
|
-
|
|
245
|
-
async def deserialize_thread(self, thread_id: str, agent_id: str, serialized_state: dict[str, Any]) -> bool:
|
|
246
|
-
"""Deserialize thread state from persistence."""
|
|
247
|
-
try:
|
|
248
|
-
thread = await AgentThread.deserialize(serialized_state)
|
|
249
|
-
# Store the restored thread
|
|
250
|
-
self.thread_storage[thread_id] = thread
|
|
251
|
-
if agent_id not in self.agent_threads:
|
|
252
|
-
self.agent_threads[agent_id] = []
|
|
253
|
-
self.agent_threads[agent_id].append(thread_id)
|
|
254
|
-
|
|
255
|
-
return True
|
|
256
|
-
|
|
257
|
-
except Exception as e:
|
|
258
|
-
logger.error(f"Error deserializing thread {thread_id}: {e}")
|
|
259
|
-
return False
|
|
260
|
-
|
|
261
91
|
async def discover_entities(self) -> list[EntityInfo]:
|
|
262
92
|
"""Discover all available entities.
|
|
263
93
|
|
|
@@ -339,9 +169,11 @@ class AgentFrameworkExecutor:
|
|
|
339
169
|
Raw Agent Framework events and trace events
|
|
340
170
|
"""
|
|
341
171
|
try:
|
|
342
|
-
# Get entity info
|
|
172
|
+
# Get entity info
|
|
343
173
|
entity_info = self.get_entity_info(entity_id)
|
|
344
|
-
|
|
174
|
+
|
|
175
|
+
# Trigger lazy loading (will return from cache if already loaded)
|
|
176
|
+
entity_obj = await self.entity_discovery.load_entity(entity_id)
|
|
345
177
|
|
|
346
178
|
if not entity_obj:
|
|
347
179
|
raise EntityNotFoundError(f"Entity object for '{entity_id}' not found")
|
|
@@ -372,7 +204,7 @@ class AgentFrameworkExecutor:
|
|
|
372
204
|
yield {"type": "error", "message": str(e), "entity_id": entity_id}
|
|
373
205
|
|
|
374
206
|
async def _execute_agent(
|
|
375
|
-
self, agent:
|
|
207
|
+
self, agent: AgentProtocol, request: AgentFrameworkRequest, trace_collector: Any
|
|
376
208
|
) -> AsyncGenerator[Any, None]:
|
|
377
209
|
"""Execute Agent Framework agent with trace collection and optional thread support.
|
|
378
210
|
|
|
@@ -388,34 +220,51 @@ class AgentFrameworkExecutor:
|
|
|
388
220
|
# Convert input to proper ChatMessage or string
|
|
389
221
|
user_message = self._convert_input_to_chat_message(request.input)
|
|
390
222
|
|
|
391
|
-
# Get thread
|
|
223
|
+
# Get thread from conversation parameter (OpenAI standard!)
|
|
392
224
|
thread = None
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
thread = self.get_thread(
|
|
225
|
+
conversation_id = request.get_conversation_id()
|
|
226
|
+
if conversation_id:
|
|
227
|
+
thread = self.conversation_store.get_thread(conversation_id)
|
|
396
228
|
if thread:
|
|
397
|
-
logger.debug(f"Using existing
|
|
229
|
+
logger.debug(f"Using existing conversation: {conversation_id}")
|
|
398
230
|
else:
|
|
399
|
-
logger.warning(f"
|
|
231
|
+
logger.warning(f"Conversation {conversation_id} not found, proceeding without thread")
|
|
400
232
|
|
|
401
233
|
if isinstance(user_message, str):
|
|
402
234
|
logger.debug(f"Executing agent with text input: {user_message[:100]}...")
|
|
403
235
|
else:
|
|
404
236
|
logger.debug(f"Executing agent with multimodal ChatMessage: {type(user_message)}")
|
|
237
|
+
# Check if agent supports streaming
|
|
238
|
+
if hasattr(agent, "run_stream") and callable(agent.run_stream):
|
|
239
|
+
# Use Agent Framework's native streaming with optional thread
|
|
240
|
+
if thread:
|
|
241
|
+
async for update in agent.run_stream(user_message, thread=thread):
|
|
242
|
+
for trace_event in trace_collector.get_pending_events():
|
|
243
|
+
yield trace_event
|
|
405
244
|
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
|
|
245
|
+
yield update
|
|
246
|
+
else:
|
|
247
|
+
async for update in agent.run_stream(user_message):
|
|
248
|
+
for trace_event in trace_collector.get_pending_events():
|
|
249
|
+
yield trace_event
|
|
250
|
+
|
|
251
|
+
yield update
|
|
252
|
+
elif hasattr(agent, "run") and callable(agent.run):
|
|
253
|
+
# Non-streaming agent - use run() and yield complete response
|
|
254
|
+
logger.info("Agent lacks run_stream(), using run() method (non-streaming)")
|
|
255
|
+
if thread:
|
|
256
|
+
response = await agent.run(user_message, thread=thread)
|
|
257
|
+
else:
|
|
258
|
+
response = await agent.run(user_message)
|
|
411
259
|
|
|
412
|
-
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
for trace_event in trace_collector.get_pending_events():
|
|
416
|
-
yield trace_event
|
|
260
|
+
# Yield trace events before response
|
|
261
|
+
for trace_event in trace_collector.get_pending_events():
|
|
262
|
+
yield trace_event
|
|
417
263
|
|
|
418
|
-
|
|
264
|
+
# Yield the complete response (mapper will convert to streaming events)
|
|
265
|
+
yield response
|
|
266
|
+
else:
|
|
267
|
+
raise ValueError("Agent must implement either run() or run_stream() method")
|
|
419
268
|
|
|
420
269
|
except Exception as e:
|
|
421
270
|
logger.error(f"Error in agent execution: {e}")
|
|
@@ -437,8 +286,8 @@ class AgentFrameworkExecutor:
|
|
|
437
286
|
try:
|
|
438
287
|
# Get input data - prefer structured data from extra_body
|
|
439
288
|
input_data: str | list[Any] | dict[str, Any]
|
|
440
|
-
if request.extra_body and
|
|
441
|
-
input_data = request.extra_body.input_data
|
|
289
|
+
if request.extra_body and isinstance(request.extra_body, dict) and request.extra_body.get("input_data"):
|
|
290
|
+
input_data = request.extra_body.get("input_data") # type: ignore
|
|
442
291
|
logger.debug(f"Using structured input_data from extra_body: {type(input_data)}")
|
|
443
292
|
else:
|
|
444
293
|
input_data = request.input
|
|
@@ -465,6 +314,9 @@ class AgentFrameworkExecutor:
|
|
|
465
314
|
def _convert_input_to_chat_message(self, input_data: Any) -> Any:
|
|
466
315
|
"""Convert OpenAI Responses API input to Agent Framework ChatMessage or string.
|
|
467
316
|
|
|
317
|
+
Handles various input formats including text, images, files, and multimodal content.
|
|
318
|
+
Falls back to string extraction for simple cases.
|
|
319
|
+
|
|
468
320
|
Args:
|
|
469
321
|
input_data: OpenAI ResponseInputParam (List[ResponseInputItemParam])
|
|
470
322
|
|
|
@@ -494,6 +346,9 @@ class AgentFrameworkExecutor:
|
|
|
494
346
|
) -> Any:
|
|
495
347
|
"""Convert OpenAI ResponseInputParam to Agent Framework ChatMessage.
|
|
496
348
|
|
|
349
|
+
Processes text, images, files, and other content types from OpenAI format
|
|
350
|
+
to Agent Framework ChatMessage with appropriate content objects.
|
|
351
|
+
|
|
497
352
|
Args:
|
|
498
353
|
input_items: List of OpenAI ResponseInputItemParam objects (dicts or objects)
|
|
499
354
|
ChatMessage: ChatMessage class for creating chat messages
|
|
@@ -579,6 +434,40 @@ class AgentFrameworkExecutor:
|
|
|
579
434
|
elif file_url:
|
|
580
435
|
contents.append(DataContent(uri=file_url, media_type=media_type))
|
|
581
436
|
|
|
437
|
+
elif content_type == "function_approval_response":
|
|
438
|
+
# Handle function approval response (DevUI extension)
|
|
439
|
+
try:
|
|
440
|
+
from agent_framework import FunctionApprovalResponseContent, FunctionCallContent
|
|
441
|
+
|
|
442
|
+
request_id = content_item.get("request_id", "")
|
|
443
|
+
approved = content_item.get("approved", False)
|
|
444
|
+
function_call_data = content_item.get("function_call", {})
|
|
445
|
+
|
|
446
|
+
# Create FunctionCallContent from the function_call data
|
|
447
|
+
function_call = FunctionCallContent(
|
|
448
|
+
call_id=function_call_data.get("id", ""),
|
|
449
|
+
name=function_call_data.get("name", ""),
|
|
450
|
+
arguments=function_call_data.get("arguments", {}),
|
|
451
|
+
)
|
|
452
|
+
|
|
453
|
+
# Create FunctionApprovalResponseContent with correct signature
|
|
454
|
+
approval_response = FunctionApprovalResponseContent(
|
|
455
|
+
approved, # positional argument
|
|
456
|
+
id=request_id, # keyword argument 'id', NOT 'request_id'
|
|
457
|
+
function_call=function_call, # FunctionCallContent object
|
|
458
|
+
)
|
|
459
|
+
contents.append(approval_response)
|
|
460
|
+
logger.info(
|
|
461
|
+
f"Added FunctionApprovalResponseContent: id={request_id}, "
|
|
462
|
+
f"approved={approved}, call_id={function_call.call_id}"
|
|
463
|
+
)
|
|
464
|
+
except ImportError:
|
|
465
|
+
logger.warning(
|
|
466
|
+
"FunctionApprovalResponseContent not available in agent_framework"
|
|
467
|
+
)
|
|
468
|
+
except Exception as e:
|
|
469
|
+
logger.error(f"Failed to create FunctionApprovalResponseContent: {e}")
|
|
470
|
+
|
|
582
471
|
# Handle other OpenAI input item types as needed
|
|
583
472
|
# (tool calls, function results, etc.)
|
|
584
473
|
|
|
@@ -638,6 +527,37 @@ class AgentFrameworkExecutor:
|
|
|
638
527
|
logger.warning(f"Error parsing workflow input: {e}")
|
|
639
528
|
return raw_input
|
|
640
529
|
|
|
530
|
+
def _get_start_executor_message_types(self, workflow: Any) -> tuple[Any | None, list[Any]]:
|
|
531
|
+
"""Return start executor and its declared input types."""
|
|
532
|
+
try:
|
|
533
|
+
start_executor = workflow.get_start_executor()
|
|
534
|
+
except Exception as exc: # pragma: no cover - defensive logging path
|
|
535
|
+
logger.debug(f"Unable to access workflow start executor: {exc}")
|
|
536
|
+
return None, []
|
|
537
|
+
|
|
538
|
+
if not start_executor:
|
|
539
|
+
return None, []
|
|
540
|
+
|
|
541
|
+
message_types: list[Any] = []
|
|
542
|
+
|
|
543
|
+
try:
|
|
544
|
+
input_types = getattr(start_executor, "input_types", None)
|
|
545
|
+
except Exception as exc: # pragma: no cover - defensive logging path
|
|
546
|
+
logger.debug(f"Failed to read executor input_types: {exc}")
|
|
547
|
+
else:
|
|
548
|
+
if input_types:
|
|
549
|
+
message_types = list(input_types)
|
|
550
|
+
|
|
551
|
+
if not message_types and hasattr(start_executor, "_handlers"):
|
|
552
|
+
try:
|
|
553
|
+
handlers = start_executor._handlers
|
|
554
|
+
if isinstance(handlers, dict):
|
|
555
|
+
message_types = list(handlers.keys())
|
|
556
|
+
except Exception as exc: # pragma: no cover - defensive logging path
|
|
557
|
+
logger.debug(f"Failed to read executor handlers: {exc}")
|
|
558
|
+
|
|
559
|
+
return start_executor, message_types
|
|
560
|
+
|
|
641
561
|
def _parse_structured_workflow_input(self, workflow: Any, input_data: dict[str, Any]) -> Any:
|
|
642
562
|
"""Parse structured input data for workflow execution.
|
|
643
563
|
|
|
@@ -649,59 +569,32 @@ class AgentFrameworkExecutor:
|
|
|
649
569
|
Parsed input for workflow
|
|
650
570
|
"""
|
|
651
571
|
try:
|
|
572
|
+
from ._utils import parse_input_for_type
|
|
573
|
+
|
|
652
574
|
# Get the start executor and its input type
|
|
653
|
-
start_executor =
|
|
654
|
-
if not start_executor
|
|
575
|
+
start_executor, message_types = self._get_start_executor_message_types(workflow)
|
|
576
|
+
if not start_executor:
|
|
655
577
|
logger.debug("Cannot determine input type for workflow - using raw dict")
|
|
656
578
|
return input_data
|
|
657
579
|
|
|
658
|
-
message_types = list(start_executor._handlers.keys())
|
|
659
580
|
if not message_types:
|
|
660
581
|
logger.debug("No message types found for start executor - using raw dict")
|
|
661
582
|
return input_data
|
|
662
583
|
|
|
663
584
|
# Get the first (primary) input type
|
|
664
|
-
|
|
585
|
+
from ._utils import select_primary_input_type
|
|
665
586
|
|
|
666
|
-
|
|
667
|
-
if input_type is
|
|
587
|
+
input_type = select_primary_input_type(message_types)
|
|
588
|
+
if input_type is None:
|
|
589
|
+
logger.debug("Could not select primary input type for workflow - using raw dict")
|
|
668
590
|
return input_data
|
|
669
591
|
|
|
670
|
-
#
|
|
671
|
-
|
|
672
|
-
try:
|
|
673
|
-
if isinstance(input_data, input_type):
|
|
674
|
-
return input_data
|
|
675
|
-
if "input" in input_data:
|
|
676
|
-
return input_type(input_data["input"])
|
|
677
|
-
if len(input_data) == 1:
|
|
678
|
-
value = next(iter(input_data.values()))
|
|
679
|
-
return input_type(value)
|
|
680
|
-
return input_data
|
|
681
|
-
except (ValueError, TypeError) as e:
|
|
682
|
-
logger.warning(f"Failed to convert input to {input_type}: {e}")
|
|
683
|
-
return input_data
|
|
684
|
-
|
|
685
|
-
# If it's a Pydantic model, validate and create instance
|
|
686
|
-
if hasattr(input_type, "model_validate"):
|
|
687
|
-
try:
|
|
688
|
-
return input_type.model_validate(input_data)
|
|
689
|
-
except Exception as e:
|
|
690
|
-
logger.warning(f"Failed to validate input as {input_type}: {e}")
|
|
691
|
-
return input_data
|
|
692
|
-
|
|
693
|
-
# If it's a dataclass or other type with annotations
|
|
694
|
-
elif hasattr(input_type, "__annotations__"):
|
|
695
|
-
try:
|
|
696
|
-
return input_type(**input_data)
|
|
697
|
-
except Exception as e:
|
|
698
|
-
logger.warning(f"Failed to create {input_type} from input data: {e}")
|
|
699
|
-
return input_data
|
|
592
|
+
# Use consolidated parsing logic from _utils
|
|
593
|
+
return parse_input_for_type(input_data, input_type)
|
|
700
594
|
|
|
701
595
|
except Exception as e:
|
|
702
596
|
logger.warning(f"Error parsing structured workflow input: {e}")
|
|
703
|
-
|
|
704
|
-
return input_data
|
|
597
|
+
return input_data
|
|
705
598
|
|
|
706
599
|
def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any:
|
|
707
600
|
"""Parse raw input string based on workflow's expected input type.
|
|
@@ -714,57 +607,29 @@ class AgentFrameworkExecutor:
|
|
|
714
607
|
Parsed input for workflow
|
|
715
608
|
"""
|
|
716
609
|
try:
|
|
610
|
+
from ._utils import parse_input_for_type
|
|
611
|
+
|
|
717
612
|
# Get the start executor and its input type
|
|
718
|
-
start_executor =
|
|
719
|
-
if not start_executor
|
|
613
|
+
start_executor, message_types = self._get_start_executor_message_types(workflow)
|
|
614
|
+
if not start_executor:
|
|
720
615
|
logger.debug("Cannot determine input type for workflow - using raw string")
|
|
721
616
|
return raw_input
|
|
722
617
|
|
|
723
|
-
message_types = list(start_executor._handlers.keys())
|
|
724
618
|
if not message_types:
|
|
725
619
|
logger.debug("No message types found for start executor - using raw string")
|
|
726
620
|
return raw_input
|
|
727
621
|
|
|
728
622
|
# Get the first (primary) input type
|
|
729
|
-
|
|
623
|
+
from ._utils import select_primary_input_type
|
|
730
624
|
|
|
731
|
-
|
|
732
|
-
if input_type is
|
|
625
|
+
input_type = select_primary_input_type(message_types)
|
|
626
|
+
if input_type is None:
|
|
627
|
+
logger.debug("Could not select primary input type for workflow - using raw string")
|
|
733
628
|
return raw_input
|
|
734
629
|
|
|
735
|
-
#
|
|
736
|
-
|
|
737
|
-
try:
|
|
738
|
-
# First try to parse as JSON
|
|
739
|
-
if raw_input.strip().startswith("{"):
|
|
740
|
-
return input_type.model_validate_json(raw_input)
|
|
741
|
-
|
|
742
|
-
# Try common field names
|
|
743
|
-
common_fields = ["message", "text", "input", "data", "content"]
|
|
744
|
-
for field in common_fields:
|
|
745
|
-
try:
|
|
746
|
-
return input_type(**{field: raw_input})
|
|
747
|
-
except Exception as e:
|
|
748
|
-
logger.debug(f"Failed to parse input using field '{field}': {e}")
|
|
749
|
-
continue
|
|
750
|
-
|
|
751
|
-
# Last resort: try default constructor
|
|
752
|
-
return input_type()
|
|
753
|
-
|
|
754
|
-
except Exception as e:
|
|
755
|
-
logger.debug(f"Failed to parse input as {input_type}: {e}")
|
|
756
|
-
|
|
757
|
-
# If it's a dataclass, try JSON parsing
|
|
758
|
-
elif hasattr(input_type, "__annotations__"):
|
|
759
|
-
try:
|
|
760
|
-
if raw_input.strip().startswith("{"):
|
|
761
|
-
parsed = json.loads(raw_input)
|
|
762
|
-
return input_type(**parsed)
|
|
763
|
-
except Exception as e:
|
|
764
|
-
logger.debug(f"Failed to parse input as {input_type}: {e}")
|
|
630
|
+
# Use consolidated parsing logic from _utils
|
|
631
|
+
return parse_input_for_type(raw_input, input_type)
|
|
765
632
|
|
|
766
633
|
except Exception as e:
|
|
767
|
-
logger.debug(f"Error
|
|
768
|
-
|
|
769
|
-
# Fallback: return raw string
|
|
770
|
-
return raw_input
|
|
634
|
+
logger.debug(f"Error parsing workflow input: {e}")
|
|
635
|
+
return raw_input
|