agent-framework-devui 0.0.1a0__py3-none-any.whl → 1.0.0b251001__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agent-framework-devui might be problematic. Click here for more details.

@@ -0,0 +1,770 @@
1
+ # Copyright (c) Microsoft. All rights reserved.
2
+
3
+ """Agent Framework executor implementation."""
4
+
5
+ import json
6
+ import logging
7
+ import os
8
+ import uuid
9
+ from collections.abc import AsyncGenerator
10
+ from typing import Any
11
+
12
+ from agent_framework import AgentThread
13
+
14
+ from ._discovery import EntityDiscovery
15
+ from ._mapper import MessageMapper
16
+ from ._tracing import capture_traces
17
+ from .models import AgentFrameworkRequest, OpenAIResponse
18
+ from .models._discovery_models import EntityInfo
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class EntityNotFoundError(Exception):
24
+ """Raised when an entity is not found."""
25
+
26
+ pass
27
+
28
+
29
+ class AgentFrameworkExecutor:
30
+ """Executor for Agent Framework entities - agents and workflows."""
31
+
32
+ def __init__(self, entity_discovery: EntityDiscovery, message_mapper: MessageMapper):
33
+ """Initialize Agent Framework executor.
34
+
35
+ Args:
36
+ entity_discovery: Entity discovery instance
37
+ message_mapper: Message mapper instance
38
+ """
39
+ self.entity_discovery = entity_discovery
40
+ self.message_mapper = message_mapper
41
+ self._setup_tracing_provider()
42
+ self._setup_agent_framework_tracing()
43
+
44
+ # Minimal thread storage - no metadata needed
45
+ self.thread_storage: dict[str, AgentThread] = {}
46
+ self.agent_threads: dict[str, list[str]] = {} # agent_id -> thread_ids
47
+
48
+ def _setup_tracing_provider(self) -> None:
49
+ """Set up our own TracerProvider so we can add processors."""
50
+ try:
51
+ from opentelemetry import trace
52
+ from opentelemetry.sdk.resources import Resource
53
+ from opentelemetry.sdk.trace import TracerProvider
54
+
55
+ # Only set up if no provider exists yet
56
+ if not hasattr(trace, "_TRACER_PROVIDER") or trace._TRACER_PROVIDER is None:
57
+ resource = Resource.create({
58
+ "service.name": "agent-framework-server",
59
+ "service.version": "1.0.0",
60
+ })
61
+ provider = TracerProvider(resource=resource)
62
+ trace.set_tracer_provider(provider)
63
+ logger.info("Set up TracerProvider for server tracing")
64
+ else:
65
+ logger.debug("TracerProvider already exists")
66
+
67
+ except ImportError:
68
+ logger.debug("OpenTelemetry not available")
69
+ except Exception as e:
70
+ logger.warning(f"Failed to setup TracerProvider: {e}")
71
+
72
+ def _setup_agent_framework_tracing(self) -> None:
73
+ """Set up Agent Framework's built-in tracing."""
74
+ # Configure Agent Framework tracing only if ENABLE_OTEL is set
75
+ if os.environ.get("ENABLE_OTEL"):
76
+ try:
77
+ from agent_framework.observability import setup_observability
78
+
79
+ setup_observability(enable_sensitive_data=True)
80
+ logger.info("Enabled Agent Framework observability")
81
+ except Exception as e:
82
+ logger.warning(f"Failed to enable Agent Framework observability: {e}")
83
+ else:
84
+ logger.debug("ENABLE_OTEL not set, skipping observability setup")
85
+
86
+ # Thread Management Methods
87
+ def create_thread(self, agent_id: str) -> str:
88
+ """Create new thread for agent."""
89
+ thread_id = f"thread_{uuid.uuid4().hex[:8]}"
90
+ thread = AgentThread()
91
+
92
+ self.thread_storage[thread_id] = thread
93
+
94
+ if agent_id not in self.agent_threads:
95
+ self.agent_threads[agent_id] = []
96
+ self.agent_threads[agent_id].append(thread_id)
97
+
98
+ return thread_id
99
+
100
+ def get_thread(self, thread_id: str) -> AgentThread | None:
101
+ """Get AgentThread by ID."""
102
+ return self.thread_storage.get(thread_id)
103
+
104
+ def list_threads_for_agent(self, agent_id: str) -> list[str]:
105
+ """List thread IDs for agent."""
106
+ return self.agent_threads.get(agent_id, [])
107
+
108
+ def get_agent_for_thread(self, thread_id: str) -> str | None:
109
+ """Find which agent owns this thread."""
110
+ for agent_id, thread_ids in self.agent_threads.items():
111
+ if thread_id in thread_ids:
112
+ return agent_id
113
+ return None
114
+
115
+ def delete_thread(self, thread_id: str) -> bool:
116
+ """Delete thread."""
117
+ if thread_id not in self.thread_storage:
118
+ return False
119
+
120
+ for _agent_id, thread_ids in self.agent_threads.items():
121
+ if thread_id in thread_ids:
122
+ thread_ids.remove(thread_id)
123
+ break
124
+
125
+ del self.thread_storage[thread_id]
126
+ return True
127
+
128
+ async def get_thread_messages(self, thread_id: str) -> list[dict[str, Any]]:
129
+ """Get messages from a thread's message store, preserving all content types for UI display."""
130
+ thread = self.get_thread(thread_id)
131
+ if not thread or not thread.message_store:
132
+ return []
133
+
134
+ try:
135
+ # Get AgentFramework ChatMessage objects from thread
136
+ af_messages = await thread.message_store.list_messages()
137
+
138
+ ui_messages = []
139
+ for i, af_msg in enumerate(af_messages):
140
+ # Extract role value (handle enum)
141
+ role = af_msg.role.value if hasattr(af_msg.role, "value") else str(af_msg.role)
142
+
143
+ # Skip tool/function messages - only show user and assistant messages
144
+ if role not in ["user", "assistant"]:
145
+ continue
146
+
147
+ # Extract all user-facing content (text, images, files, etc.)
148
+ display_contents = self._extract_display_contents(af_msg.contents)
149
+
150
+ # Skip messages with no displayable content
151
+ if not display_contents:
152
+ continue
153
+
154
+ ui_message = {
155
+ "id": af_msg.message_id or f"restored-{i}",
156
+ "role": role,
157
+ "contents": display_contents,
158
+ "timestamp": __import__("datetime").datetime.now().isoformat(),
159
+ "author_name": af_msg.author_name,
160
+ "message_id": af_msg.message_id,
161
+ }
162
+
163
+ ui_messages.append(ui_message)
164
+
165
+ logger.info(f"Restored {len(ui_messages)} display messages for thread {thread_id}")
166
+ return ui_messages
167
+
168
+ except Exception as e:
169
+ logger.error(f"Error getting thread messages: {e}")
170
+ import traceback
171
+
172
+ logger.error(traceback.format_exc())
173
+ return []
174
+
175
+ def _extract_display_contents(self, contents: list[Any]) -> list[dict[str, Any]]:
176
+ """Extract all user-facing content (text, images, files, etc.) from message contents.
177
+
178
+ Filters out internal mechanics like function calls/results while preserving
179
+ all content types that should be displayed in the UI.
180
+ """
181
+ display_contents = []
182
+
183
+ for content in contents:
184
+ content_type = getattr(content, "type", None)
185
+
186
+ # Text content
187
+ if content_type == "text":
188
+ text = getattr(content, "text", "")
189
+
190
+ # Handle double-encoded JSON from user messages
191
+ if text.startswith('{"role":'):
192
+ try:
193
+ import json
194
+
195
+ parsed = json.loads(text)
196
+ if parsed.get("contents"):
197
+ for sub_content in parsed["contents"]:
198
+ if sub_content.get("type") == "text":
199
+ display_contents.append({"type": "text", "text": sub_content.get("text", "")})
200
+ except Exception:
201
+ display_contents.append({"type": "text", "text": text})
202
+ else:
203
+ display_contents.append({"type": "text", "text": text})
204
+
205
+ # Data content (images, files, PDFs, etc.)
206
+ elif content_type == "data":
207
+ display_contents.append({
208
+ "type": "data",
209
+ "uri": getattr(content, "uri", ""),
210
+ "media_type": getattr(content, "media_type", None),
211
+ })
212
+
213
+ # URI content (external links to images/files)
214
+ elif content_type == "uri":
215
+ display_contents.append({
216
+ "type": "uri",
217
+ "uri": getattr(content, "uri", ""),
218
+ "media_type": getattr(content, "media_type", None),
219
+ })
220
+
221
+ # Skip function_call, function_result, and other internal content types
222
+
223
+ return display_contents
224
+
225
+ async def serialize_thread(self, thread_id: str) -> dict[str, Any] | None:
226
+ """Serialize thread state for persistence."""
227
+ thread = self.get_thread(thread_id)
228
+ if not thread:
229
+ return None
230
+
231
+ try:
232
+ # Use AgentThread's built-in serialization
233
+ serialized_state = await thread.serialize()
234
+
235
+ # Add our metadata
236
+ agent_id = self.get_agent_for_thread(thread_id)
237
+ serialized_state["metadata"] = {"agent_id": agent_id, "thread_id": thread_id}
238
+
239
+ return serialized_state
240
+
241
+ except Exception as e:
242
+ logger.error(f"Error serializing thread {thread_id}: {e}")
243
+ return None
244
+
245
+ async def deserialize_thread(self, thread_id: str, agent_id: str, serialized_state: dict[str, Any]) -> bool:
246
+ """Deserialize thread state from persistence."""
247
+ try:
248
+ thread = await AgentThread.deserialize(serialized_state)
249
+ # Store the restored thread
250
+ self.thread_storage[thread_id] = thread
251
+ if agent_id not in self.agent_threads:
252
+ self.agent_threads[agent_id] = []
253
+ self.agent_threads[agent_id].append(thread_id)
254
+
255
+ return True
256
+
257
+ except Exception as e:
258
+ logger.error(f"Error deserializing thread {thread_id}: {e}")
259
+ return False
260
+
261
+ async def discover_entities(self) -> list[EntityInfo]:
262
+ """Discover all available entities.
263
+
264
+ Returns:
265
+ List of discovered entities
266
+ """
267
+ return await self.entity_discovery.discover_entities()
268
+
269
+ def get_entity_info(self, entity_id: str) -> EntityInfo:
270
+ """Get entity information.
271
+
272
+ Args:
273
+ entity_id: Entity identifier
274
+
275
+ Returns:
276
+ Entity information
277
+
278
+ Raises:
279
+ EntityNotFoundError: If entity is not found
280
+ """
281
+ entity_info = self.entity_discovery.get_entity_info(entity_id)
282
+ if entity_info is None:
283
+ raise EntityNotFoundError(f"Entity '{entity_id}' not found")
284
+ return entity_info
285
+
286
+ async def execute_streaming(self, request: AgentFrameworkRequest) -> AsyncGenerator[Any, None]:
287
+ """Execute request and stream results in OpenAI format.
288
+
289
+ Args:
290
+ request: Request to execute
291
+
292
+ Yields:
293
+ OpenAI response stream events
294
+ """
295
+ try:
296
+ entity_id = request.get_entity_id()
297
+ if not entity_id:
298
+ logger.error("No entity_id specified in request")
299
+ return
300
+
301
+ # Validate entity exists
302
+ if not self.entity_discovery.get_entity_info(entity_id):
303
+ logger.error(f"Entity '{entity_id}' not found")
304
+ return
305
+
306
+ # Execute entity and convert events
307
+ async for raw_event in self.execute_entity(entity_id, request):
308
+ openai_events = await self.message_mapper.convert_event(raw_event, request)
309
+ for event in openai_events:
310
+ yield event
311
+
312
+ except Exception as e:
313
+ logger.exception(f"Error in streaming execution: {e}")
314
+ # Could yield error event here
315
+
316
+ async def execute_sync(self, request: AgentFrameworkRequest) -> OpenAIResponse:
317
+ """Execute request synchronously and return complete response.
318
+
319
+ Args:
320
+ request: Request to execute
321
+
322
+ Returns:
323
+ Final aggregated OpenAI response
324
+ """
325
+ # Collect all streaming events
326
+ events = [event async for event in self.execute_streaming(request)]
327
+
328
+ # Aggregate into final response
329
+ return await self.message_mapper.aggregate_to_response(events, request)
330
+
331
+ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) -> AsyncGenerator[Any, None]:
332
+ """Execute the entity and yield raw Agent Framework events plus trace events.
333
+
334
+ Args:
335
+ entity_id: ID of entity to execute
336
+ request: Request to execute
337
+
338
+ Yields:
339
+ Raw Agent Framework events and trace events
340
+ """
341
+ try:
342
+ # Get entity info and object
343
+ entity_info = self.get_entity_info(entity_id)
344
+ entity_obj = self.entity_discovery.get_entity_object(entity_id)
345
+
346
+ if not entity_obj:
347
+ raise EntityNotFoundError(f"Entity object for '{entity_id}' not found")
348
+
349
+ logger.info(f"Executing {entity_info.type}: {entity_id}")
350
+
351
+ # Extract session_id from request for trace context
352
+ session_id = getattr(request.extra_body, "session_id", None) if request.extra_body else None
353
+
354
+ # Use simplified trace capture
355
+ with capture_traces(session_id=session_id, entity_id=entity_id) as trace_collector:
356
+ if entity_info.type == "agent":
357
+ async for event in self._execute_agent(entity_obj, request, trace_collector):
358
+ yield event
359
+ elif entity_info.type == "workflow":
360
+ async for event in self._execute_workflow(entity_obj, request, trace_collector):
361
+ yield event
362
+ else:
363
+ raise ValueError(f"Unsupported entity type: {entity_info.type}")
364
+
365
+ # Yield any remaining trace events after execution completes
366
+ for trace_event in trace_collector.get_pending_events():
367
+ yield trace_event
368
+
369
+ except Exception as e:
370
+ logger.exception(f"Error executing entity {entity_id}: {e}")
371
+ # Yield error event
372
+ yield {"type": "error", "message": str(e), "entity_id": entity_id}
373
+
374
+ async def _execute_agent(
375
+ self, agent: Any, request: AgentFrameworkRequest, trace_collector: Any
376
+ ) -> AsyncGenerator[Any, None]:
377
+ """Execute Agent Framework agent with trace collection and optional thread support.
378
+
379
+ Args:
380
+ agent: Agent object to execute
381
+ request: Request to execute
382
+ trace_collector: Trace collector to get events from
383
+
384
+ Yields:
385
+ Agent update events and trace events
386
+ """
387
+ try:
388
+ # Convert input to proper ChatMessage or string
389
+ user_message = self._convert_input_to_chat_message(request.input)
390
+
391
+ # Get thread if provided in extra_body
392
+ thread = None
393
+ if request.extra_body and hasattr(request.extra_body, "thread_id") and request.extra_body.thread_id:
394
+ thread_id = request.extra_body.thread_id
395
+ thread = self.get_thread(thread_id)
396
+ if thread:
397
+ logger.debug(f"Using existing thread: {thread_id}")
398
+ else:
399
+ logger.warning(f"Thread {thread_id} not found, proceeding without thread")
400
+
401
+ if isinstance(user_message, str):
402
+ logger.debug(f"Executing agent with text input: {user_message[:100]}...")
403
+ else:
404
+ logger.debug(f"Executing agent with multimodal ChatMessage: {type(user_message)}")
405
+
406
+ # Use Agent Framework's native streaming with optional thread
407
+ if thread:
408
+ async for update in agent.run_stream(user_message, thread=thread):
409
+ for trace_event in trace_collector.get_pending_events():
410
+ yield trace_event
411
+
412
+ yield update
413
+ else:
414
+ async for update in agent.run_stream(user_message):
415
+ for trace_event in trace_collector.get_pending_events():
416
+ yield trace_event
417
+
418
+ yield update
419
+
420
+ except Exception as e:
421
+ logger.error(f"Error in agent execution: {e}")
422
+ yield {"type": "error", "message": f"Agent execution error: {e!s}"}
423
+
424
+ async def _execute_workflow(
425
+ self, workflow: Any, request: AgentFrameworkRequest, trace_collector: Any
426
+ ) -> AsyncGenerator[Any, None]:
427
+ """Execute Agent Framework workflow with trace collection.
428
+
429
+ Args:
430
+ workflow: Workflow object to execute
431
+ request: Request to execute
432
+ trace_collector: Trace collector to get events from
433
+
434
+ Yields:
435
+ Workflow events and trace events
436
+ """
437
+ try:
438
+ # Get input data - prefer structured data from extra_body
439
+ input_data: str | list[Any] | dict[str, Any]
440
+ if request.extra_body and hasattr(request.extra_body, "input_data") and request.extra_body.input_data:
441
+ input_data = request.extra_body.input_data
442
+ logger.debug(f"Using structured input_data from extra_body: {type(input_data)}")
443
+ else:
444
+ input_data = request.input
445
+ logger.debug(f"Using input field as fallback: {type(input_data)}")
446
+
447
+ # Parse input based on workflow's expected input type
448
+ parsed_input = await self._parse_workflow_input(workflow, input_data)
449
+
450
+ logger.debug(f"Executing workflow with parsed input type: {type(parsed_input)}")
451
+
452
+ # Use Agent Framework workflow's native streaming
453
+ async for event in workflow.run_stream(parsed_input):
454
+ # Yield any pending trace events first
455
+ for trace_event in trace_collector.get_pending_events():
456
+ yield trace_event
457
+
458
+ # Then yield the workflow event
459
+ yield event
460
+
461
+ except Exception as e:
462
+ logger.error(f"Error in workflow execution: {e}")
463
+ yield {"type": "error", "message": f"Workflow execution error: {e!s}"}
464
+
465
+ def _convert_input_to_chat_message(self, input_data: Any) -> Any:
466
+ """Convert OpenAI Responses API input to Agent Framework ChatMessage or string.
467
+
468
+ Args:
469
+ input_data: OpenAI ResponseInputParam (List[ResponseInputItemParam])
470
+
471
+ Returns:
472
+ ChatMessage for multimodal content, or string for simple text
473
+ """
474
+ # Import Agent Framework types
475
+ try:
476
+ from agent_framework import ChatMessage, DataContent, Role, TextContent
477
+ except ImportError:
478
+ # Fallback to string extraction if Agent Framework not available
479
+ return self._extract_user_message_fallback(input_data)
480
+
481
+ # Handle simple string input (backward compatibility)
482
+ if isinstance(input_data, str):
483
+ return input_data
484
+
485
+ # Handle OpenAI ResponseInputParam (List[ResponseInputItemParam])
486
+ if isinstance(input_data, list):
487
+ return self._convert_openai_input_to_chat_message(input_data, ChatMessage, TextContent, DataContent, Role)
488
+
489
+ # Fallback for other formats
490
+ return self._extract_user_message_fallback(input_data)
491
+
492
+ def _convert_openai_input_to_chat_message(
493
+ self, input_items: list[Any], ChatMessage: Any, TextContent: Any, DataContent: Any, Role: Any
494
+ ) -> Any:
495
+ """Convert OpenAI ResponseInputParam to Agent Framework ChatMessage.
496
+
497
+ Args:
498
+ input_items: List of OpenAI ResponseInputItemParam objects (dicts or objects)
499
+ ChatMessage: ChatMessage class for creating chat messages
500
+ TextContent: TextContent class for text content
501
+ DataContent: DataContent class for data/media content
502
+ Role: Role enum for message roles
503
+
504
+ Returns:
505
+ ChatMessage with converted content
506
+ """
507
+ contents = []
508
+
509
+ # Process each input item
510
+ for item in input_items:
511
+ # Handle dict format (from JSON)
512
+ if isinstance(item, dict):
513
+ item_type = item.get("type")
514
+ if item_type == "message":
515
+ # Extract content from OpenAI message
516
+ message_content = item.get("content", [])
517
+
518
+ # Handle both string content and list content
519
+ if isinstance(message_content, str):
520
+ contents.append(TextContent(text=message_content))
521
+ elif isinstance(message_content, list):
522
+ for content_item in message_content:
523
+ # Handle dict content items
524
+ if isinstance(content_item, dict):
525
+ content_type = content_item.get("type")
526
+
527
+ if content_type == "input_text":
528
+ text = content_item.get("text", "")
529
+ contents.append(TextContent(text=text))
530
+
531
+ elif content_type == "input_image":
532
+ image_url = content_item.get("image_url", "")
533
+ if image_url:
534
+ # Extract media type from data URI if possible
535
+ # Parse media type from data URL, fallback to image/png
536
+ if image_url.startswith("data:"):
537
+ try:
538
+ # Extract media type from data:image/jpeg;base64,... format
539
+ media_type = image_url.split(";")[0].split(":")[1]
540
+ except (IndexError, AttributeError):
541
+ logger.warning(
542
+ f"Failed to parse media type from data URL: {image_url[:30]}..."
543
+ )
544
+ media_type = "image/png"
545
+ else:
546
+ media_type = "image/png"
547
+ contents.append(DataContent(uri=image_url, media_type=media_type))
548
+
549
+ elif content_type == "input_file":
550
+ # Handle file input
551
+ file_data = content_item.get("file_data")
552
+ file_url = content_item.get("file_url")
553
+ filename = content_item.get("filename", "")
554
+
555
+ # Determine media type from filename
556
+ media_type = "application/octet-stream" # default
557
+ if filename:
558
+ if filename.lower().endswith(".pdf"):
559
+ media_type = "application/pdf"
560
+ elif filename.lower().endswith((".png", ".jpg", ".jpeg", ".gif")):
561
+ media_type = f"image/{filename.split('.')[-1].lower()}"
562
+ elif filename.lower().endswith((
563
+ ".wav",
564
+ ".mp3",
565
+ ".m4a",
566
+ ".ogg",
567
+ ".flac",
568
+ ".aac",
569
+ )):
570
+ ext = filename.split(".")[-1].lower()
571
+ # Normalize extensions to match audio MIME types
572
+ media_type = "audio/mp4" if ext == "m4a" else f"audio/{ext}"
573
+
574
+ # Use file_data or file_url
575
+ if file_data:
576
+ # Assume file_data is base64, create data URI
577
+ data_uri = f"data:{media_type};base64,{file_data}"
578
+ contents.append(DataContent(uri=data_uri, media_type=media_type))
579
+ elif file_url:
580
+ contents.append(DataContent(uri=file_url, media_type=media_type))
581
+
582
+ # Handle other OpenAI input item types as needed
583
+ # (tool calls, function results, etc.)
584
+
585
+ # If no contents found, create a simple text message
586
+ if not contents:
587
+ contents.append(TextContent(text=""))
588
+
589
+ chat_message = ChatMessage(role=Role.USER, contents=contents)
590
+
591
+ logger.info(f"Created ChatMessage with {len(contents)} contents:")
592
+ for idx, content in enumerate(contents):
593
+ content_type = content.__class__.__name__
594
+ if hasattr(content, "media_type"):
595
+ logger.info(f" [{idx}] {content_type} - media_type: {content.media_type}")
596
+ else:
597
+ logger.info(f" [{idx}] {content_type}")
598
+
599
+ return chat_message
600
+
601
+ def _extract_user_message_fallback(self, input_data: Any) -> str:
602
+ """Fallback method to extract user message as string.
603
+
604
+ Args:
605
+ input_data: Input data in various formats
606
+
607
+ Returns:
608
+ Extracted user message string
609
+ """
610
+ if isinstance(input_data, str):
611
+ return input_data
612
+ if isinstance(input_data, dict):
613
+ # Try common field names
614
+ for field in ["message", "text", "input", "content", "query"]:
615
+ if field in input_data:
616
+ return str(input_data[field])
617
+ # Fallback to JSON string
618
+ return json.dumps(input_data)
619
+ return str(input_data)
620
+
621
+ async def _parse_workflow_input(self, workflow: Any, raw_input: Any) -> Any:
622
+ """Parse input based on workflow's expected input type.
623
+
624
+ Args:
625
+ workflow: Workflow object
626
+ raw_input: Raw input data
627
+
628
+ Returns:
629
+ Parsed input appropriate for the workflow
630
+ """
631
+ try:
632
+ # Handle structured input
633
+ if isinstance(raw_input, dict):
634
+ return self._parse_structured_workflow_input(workflow, raw_input)
635
+ return self._parse_raw_workflow_input(workflow, str(raw_input))
636
+
637
+ except Exception as e:
638
+ logger.warning(f"Error parsing workflow input: {e}")
639
+ return raw_input
640
+
641
+ def _parse_structured_workflow_input(self, workflow: Any, input_data: dict[str, Any]) -> Any:
642
+ """Parse structured input data for workflow execution.
643
+
644
+ Args:
645
+ workflow: Workflow object
646
+ input_data: Structured input data
647
+
648
+ Returns:
649
+ Parsed input for workflow
650
+ """
651
+ try:
652
+ # Get the start executor and its input type
653
+ start_executor = workflow.get_start_executor()
654
+ if not start_executor or not hasattr(start_executor, "_handlers"):
655
+ logger.debug("Cannot determine input type for workflow - using raw dict")
656
+ return input_data
657
+
658
+ message_types = list(start_executor._handlers.keys())
659
+ if not message_types:
660
+ logger.debug("No message types found for start executor - using raw dict")
661
+ return input_data
662
+
663
+ # Get the first (primary) input type
664
+ input_type = message_types[0]
665
+
666
+ # If input type is dict, return as-is
667
+ if input_type is dict:
668
+ return input_data
669
+
670
+ # Handle primitive types
671
+ if input_type in (str, int, float, bool):
672
+ try:
673
+ if isinstance(input_data, input_type):
674
+ return input_data
675
+ if "input" in input_data:
676
+ return input_type(input_data["input"])
677
+ if len(input_data) == 1:
678
+ value = next(iter(input_data.values()))
679
+ return input_type(value)
680
+ return input_data
681
+ except (ValueError, TypeError) as e:
682
+ logger.warning(f"Failed to convert input to {input_type}: {e}")
683
+ return input_data
684
+
685
+ # If it's a Pydantic model, validate and create instance
686
+ if hasattr(input_type, "model_validate"):
687
+ try:
688
+ return input_type.model_validate(input_data)
689
+ except Exception as e:
690
+ logger.warning(f"Failed to validate input as {input_type}: {e}")
691
+ return input_data
692
+
693
+ # If it's a dataclass or other type with annotations
694
+ elif hasattr(input_type, "__annotations__"):
695
+ try:
696
+ return input_type(**input_data)
697
+ except Exception as e:
698
+ logger.warning(f"Failed to create {input_type} from input data: {e}")
699
+ return input_data
700
+
701
+ except Exception as e:
702
+ logger.warning(f"Error parsing structured workflow input: {e}")
703
+
704
+ return input_data
705
+
706
+ def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any:
707
+ """Parse raw input string based on workflow's expected input type.
708
+
709
+ Args:
710
+ workflow: Workflow object
711
+ raw_input: Raw input string
712
+
713
+ Returns:
714
+ Parsed input for workflow
715
+ """
716
+ try:
717
+ # Get the start executor and its input type
718
+ start_executor = workflow.get_start_executor()
719
+ if not start_executor or not hasattr(start_executor, "_handlers"):
720
+ logger.debug("Cannot determine input type for workflow - using raw string")
721
+ return raw_input
722
+
723
+ message_types = list(start_executor._handlers.keys())
724
+ if not message_types:
725
+ logger.debug("No message types found for start executor - using raw string")
726
+ return raw_input
727
+
728
+ # Get the first (primary) input type
729
+ input_type = message_types[0]
730
+
731
+ # If input type is str, return as-is
732
+ if input_type is str:
733
+ return raw_input
734
+
735
+ # If it's a Pydantic model, try to parse JSON
736
+ if hasattr(input_type, "model_validate_json"):
737
+ try:
738
+ # First try to parse as JSON
739
+ if raw_input.strip().startswith("{"):
740
+ return input_type.model_validate_json(raw_input)
741
+
742
+ # Try common field names
743
+ common_fields = ["message", "text", "input", "data", "content"]
744
+ for field in common_fields:
745
+ try:
746
+ return input_type(**{field: raw_input})
747
+ except Exception as e:
748
+ logger.debug(f"Failed to parse input using field '{field}': {e}")
749
+ continue
750
+
751
+ # Last resort: try default constructor
752
+ return input_type()
753
+
754
+ except Exception as e:
755
+ logger.debug(f"Failed to parse input as {input_type}: {e}")
756
+
757
+ # If it's a dataclass, try JSON parsing
758
+ elif hasattr(input_type, "__annotations__"):
759
+ try:
760
+ if raw_input.strip().startswith("{"):
761
+ parsed = json.loads(raw_input)
762
+ return input_type(**parsed)
763
+ except Exception as e:
764
+ logger.debug(f"Failed to parse input as {input_type}: {e}")
765
+
766
+ except Exception as e:
767
+ logger.debug(f"Error determining workflow input type: {e}")
768
+
769
+ # Fallback: return raw string
770
+ return raw_input