agent-framework-devui 0.0.1a0__py3-none-any.whl → 1.0.0b251007__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agent-framework-devui might be problematic. Click here for more details.

@@ -0,0 +1,777 @@
1
+ # Copyright (c) Microsoft. All rights reserved.
2
+
3
+ """Agent Framework executor implementation."""
4
+
5
+ import json
6
+ import logging
7
+ import os
8
+ import uuid
9
+ from collections.abc import AsyncGenerator
10
+ from typing import Any, get_origin
11
+
12
+ from agent_framework import AgentThread
13
+
14
+ from ._discovery import EntityDiscovery
15
+ from ._mapper import MessageMapper
16
+ from ._tracing import capture_traces
17
+ from .models import AgentFrameworkRequest, OpenAIResponse
18
+ from .models._discovery_models import EntityInfo
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+
23
+ class EntityNotFoundError(Exception):
24
+ """Raised when an entity is not found."""
25
+
26
+ pass
27
+
28
+
29
+ class AgentFrameworkExecutor:
30
+ """Executor for Agent Framework entities - agents and workflows."""
31
+
32
+ def __init__(self, entity_discovery: EntityDiscovery, message_mapper: MessageMapper):
33
+ """Initialize Agent Framework executor.
34
+
35
+ Args:
36
+ entity_discovery: Entity discovery instance
37
+ message_mapper: Message mapper instance
38
+ """
39
+ self.entity_discovery = entity_discovery
40
+ self.message_mapper = message_mapper
41
+ self._setup_tracing_provider()
42
+ self._setup_agent_framework_tracing()
43
+
44
+ # Minimal thread storage - no metadata needed
45
+ self.thread_storage: dict[str, AgentThread] = {}
46
+ self.agent_threads: dict[str, list[str]] = {} # agent_id -> thread_ids
47
+
48
+ def _setup_tracing_provider(self) -> None:
49
+ """Set up our own TracerProvider so we can add processors."""
50
+ try:
51
+ from opentelemetry import trace
52
+ from opentelemetry.sdk.resources import Resource
53
+ from opentelemetry.sdk.trace import TracerProvider
54
+
55
+ # Only set up if no provider exists yet
56
+ if not hasattr(trace, "_TRACER_PROVIDER") or trace._TRACER_PROVIDER is None:
57
+ resource = Resource.create({
58
+ "service.name": "agent-framework-server",
59
+ "service.version": "1.0.0",
60
+ })
61
+ provider = TracerProvider(resource=resource)
62
+ trace.set_tracer_provider(provider)
63
+ logger.info("Set up TracerProvider for server tracing")
64
+ else:
65
+ logger.debug("TracerProvider already exists")
66
+
67
+ except ImportError:
68
+ logger.debug("OpenTelemetry not available")
69
+ except Exception as e:
70
+ logger.warning(f"Failed to setup TracerProvider: {e}")
71
+
72
+ def _setup_agent_framework_tracing(self) -> None:
73
+ """Set up Agent Framework's built-in tracing."""
74
+ # Configure Agent Framework tracing only if ENABLE_OTEL is set
75
+ if os.environ.get("ENABLE_OTEL"):
76
+ try:
77
+ from agent_framework.observability import setup_observability
78
+
79
+ setup_observability(enable_sensitive_data=True)
80
+ logger.info("Enabled Agent Framework observability")
81
+ except Exception as e:
82
+ logger.warning(f"Failed to enable Agent Framework observability: {e}")
83
+ else:
84
+ logger.debug("ENABLE_OTEL not set, skipping observability setup")
85
+
86
+ # Thread Management Methods
87
+ def create_thread(self, agent_id: str) -> str:
88
+ """Create new thread for agent."""
89
+ thread_id = f"thread_{uuid.uuid4().hex[:8]}"
90
+ thread = AgentThread()
91
+
92
+ self.thread_storage[thread_id] = thread
93
+
94
+ if agent_id not in self.agent_threads:
95
+ self.agent_threads[agent_id] = []
96
+ self.agent_threads[agent_id].append(thread_id)
97
+
98
+ return thread_id
99
+
100
+ def get_thread(self, thread_id: str) -> AgentThread | None:
101
+ """Get AgentThread by ID."""
102
+ return self.thread_storage.get(thread_id)
103
+
104
+ def list_threads_for_agent(self, agent_id: str) -> list[str]:
105
+ """List thread IDs for agent."""
106
+ return self.agent_threads.get(agent_id, [])
107
+
108
+ def get_agent_for_thread(self, thread_id: str) -> str | None:
109
+ """Find which agent owns this thread."""
110
+ for agent_id, thread_ids in self.agent_threads.items():
111
+ if thread_id in thread_ids:
112
+ return agent_id
113
+ return None
114
+
115
+ def delete_thread(self, thread_id: str) -> bool:
116
+ """Delete thread."""
117
+ if thread_id not in self.thread_storage:
118
+ return False
119
+
120
+ for _agent_id, thread_ids in self.agent_threads.items():
121
+ if thread_id in thread_ids:
122
+ thread_ids.remove(thread_id)
123
+ break
124
+
125
+ del self.thread_storage[thread_id]
126
+ return True
127
+
128
+ async def get_thread_messages(self, thread_id: str) -> list[dict[str, Any]]:
129
+ """Get messages from a thread's message store, preserving all content types for UI display."""
130
+ thread = self.get_thread(thread_id)
131
+ if not thread or not thread.message_store:
132
+ return []
133
+
134
+ try:
135
+ # Get AgentFramework ChatMessage objects from thread
136
+ af_messages = await thread.message_store.list_messages()
137
+
138
+ ui_messages = []
139
+ for i, af_msg in enumerate(af_messages):
140
+ # Extract role value (handle enum)
141
+ role = af_msg.role.value if hasattr(af_msg.role, "value") else str(af_msg.role)
142
+
143
+ # Skip tool/function messages - only show user and assistant messages
144
+ if role not in ["user", "assistant"]:
145
+ continue
146
+
147
+ # Extract all user-facing content (text, images, files, etc.)
148
+ display_contents = self._extract_display_contents(af_msg.contents)
149
+
150
+ # Skip messages with no displayable content
151
+ if not display_contents:
152
+ continue
153
+
154
+ # Extract usage information if present
155
+ usage_data = None
156
+ for content in af_msg.contents:
157
+ content_type = getattr(content, "type", None)
158
+ if content_type == "usage":
159
+ details = getattr(content, "details", None)
160
+ if details:
161
+ usage_data = {
162
+ "total_tokens": getattr(details, "total_token_count", 0) or 0,
163
+ "prompt_tokens": getattr(details, "input_token_count", 0) or 0,
164
+ "completion_tokens": getattr(details, "output_token_count", 0) or 0,
165
+ }
166
+ break
167
+
168
+ ui_message = {
169
+ "id": af_msg.message_id or f"restored-{i}",
170
+ "role": role,
171
+ "contents": display_contents,
172
+ "timestamp": __import__("datetime").datetime.now().isoformat(),
173
+ "author_name": af_msg.author_name,
174
+ "message_id": af_msg.message_id,
175
+ }
176
+
177
+ # Add usage data if available
178
+ if usage_data:
179
+ ui_message["usage"] = usage_data
180
+
181
+ ui_messages.append(ui_message)
182
+
183
+ logger.info(f"Restored {len(ui_messages)} display messages for thread {thread_id}")
184
+ return ui_messages
185
+
186
+ except Exception as e:
187
+ logger.error(f"Error getting thread messages: {e}")
188
+ import traceback
189
+
190
+ logger.error(traceback.format_exc())
191
+ return []
192
+
193
+ def _extract_display_contents(self, contents: list[Any]) -> list[dict[str, Any]]:
194
+ """Extract all user-facing content (text, images, files, etc.) from message contents.
195
+
196
+ Filters out internal mechanics like function calls/results while preserving
197
+ all content types that should be displayed in the UI.
198
+ """
199
+ display_contents = []
200
+
201
+ for content in contents:
202
+ content_type = getattr(content, "type", None)
203
+
204
+ # Text content
205
+ if content_type == "text":
206
+ text = getattr(content, "text", "")
207
+
208
+ # Handle double-encoded JSON from user messages
209
+ if text.startswith('{"role":'):
210
+ try:
211
+ import json
212
+
213
+ parsed = json.loads(text)
214
+ if parsed.get("contents"):
215
+ for sub_content in parsed["contents"]:
216
+ if sub_content.get("type") == "text":
217
+ display_contents.append({"type": "text", "text": sub_content.get("text", "")})
218
+ except Exception:
219
+ display_contents.append({"type": "text", "text": text})
220
+ else:
221
+ display_contents.append({"type": "text", "text": text})
222
+
223
+ # Data content (images, files, PDFs, etc.)
224
+ elif content_type == "data":
225
+ display_contents.append({
226
+ "type": "data",
227
+ "uri": getattr(content, "uri", ""),
228
+ "media_type": getattr(content, "media_type", None),
229
+ })
230
+
231
+ # URI content (external links to images/files)
232
+ elif content_type == "uri":
233
+ display_contents.append({
234
+ "type": "uri",
235
+ "uri": getattr(content, "uri", ""),
236
+ "media_type": getattr(content, "media_type", None),
237
+ })
238
+
239
+ # Skip function_call, function_result, and other internal content types
240
+
241
+ return display_contents
242
+
243
+ async def serialize_thread(self, thread_id: str) -> dict[str, Any] | None:
244
+ """Serialize thread state for persistence."""
245
+ thread = self.get_thread(thread_id)
246
+ if not thread:
247
+ return None
248
+
249
+ try:
250
+ # Use AgentThread's built-in serialization
251
+ serialized_state = await thread.serialize()
252
+
253
+ # Add our metadata
254
+ agent_id = self.get_agent_for_thread(thread_id)
255
+ serialized_state["metadata"] = {"agent_id": agent_id, "thread_id": thread_id}
256
+
257
+ return serialized_state
258
+
259
+ except Exception as e:
260
+ logger.error(f"Error serializing thread {thread_id}: {e}")
261
+ return None
262
+
263
+ async def deserialize_thread(self, thread_id: str, agent_id: str, serialized_state: dict[str, Any]) -> bool:
264
+ """Deserialize thread state from persistence."""
265
+ try:
266
+ thread = await AgentThread.deserialize(serialized_state)
267
+ # Store the restored thread
268
+ self.thread_storage[thread_id] = thread
269
+ if agent_id not in self.agent_threads:
270
+ self.agent_threads[agent_id] = []
271
+ self.agent_threads[agent_id].append(thread_id)
272
+
273
+ return True
274
+
275
+ except Exception as e:
276
+ logger.error(f"Error deserializing thread {thread_id}: {e}")
277
+ return False
278
+
279
+ async def discover_entities(self) -> list[EntityInfo]:
280
+ """Discover all available entities.
281
+
282
+ Returns:
283
+ List of discovered entities
284
+ """
285
+ return await self.entity_discovery.discover_entities()
286
+
287
+ def get_entity_info(self, entity_id: str) -> EntityInfo:
288
+ """Get entity information.
289
+
290
+ Args:
291
+ entity_id: Entity identifier
292
+
293
+ Returns:
294
+ Entity information
295
+
296
+ Raises:
297
+ EntityNotFoundError: If entity is not found
298
+ """
299
+ entity_info = self.entity_discovery.get_entity_info(entity_id)
300
+ if entity_info is None:
301
+ raise EntityNotFoundError(f"Entity '{entity_id}' not found")
302
+ return entity_info
303
+
304
+ async def execute_streaming(self, request: AgentFrameworkRequest) -> AsyncGenerator[Any, None]:
305
+ """Execute request and stream results in OpenAI format.
306
+
307
+ Args:
308
+ request: Request to execute
309
+
310
+ Yields:
311
+ OpenAI response stream events
312
+ """
313
+ try:
314
+ entity_id = request.get_entity_id()
315
+ if not entity_id:
316
+ logger.error("No entity_id specified in request")
317
+ return
318
+
319
+ # Validate entity exists
320
+ if not self.entity_discovery.get_entity_info(entity_id):
321
+ logger.error(f"Entity '{entity_id}' not found")
322
+ return
323
+
324
+ # Execute entity and convert events
325
+ async for raw_event in self.execute_entity(entity_id, request):
326
+ openai_events = await self.message_mapper.convert_event(raw_event, request)
327
+ for event in openai_events:
328
+ yield event
329
+
330
+ except Exception as e:
331
+ logger.exception(f"Error in streaming execution: {e}")
332
+ # Could yield error event here
333
+
334
+ async def execute_sync(self, request: AgentFrameworkRequest) -> OpenAIResponse:
335
+ """Execute request synchronously and return complete response.
336
+
337
+ Args:
338
+ request: Request to execute
339
+
340
+ Returns:
341
+ Final aggregated OpenAI response
342
+ """
343
+ # Collect all streaming events
344
+ events = [event async for event in self.execute_streaming(request)]
345
+
346
+ # Aggregate into final response
347
+ return await self.message_mapper.aggregate_to_response(events, request)
348
+
349
+ async def execute_entity(self, entity_id: str, request: AgentFrameworkRequest) -> AsyncGenerator[Any, None]:
350
+ """Execute the entity and yield raw Agent Framework events plus trace events.
351
+
352
+ Args:
353
+ entity_id: ID of entity to execute
354
+ request: Request to execute
355
+
356
+ Yields:
357
+ Raw Agent Framework events and trace events
358
+ """
359
+ try:
360
+ # Get entity info and object
361
+ entity_info = self.get_entity_info(entity_id)
362
+ entity_obj = self.entity_discovery.get_entity_object(entity_id)
363
+
364
+ if not entity_obj:
365
+ raise EntityNotFoundError(f"Entity object for '{entity_id}' not found")
366
+
367
+ logger.info(f"Executing {entity_info.type}: {entity_id}")
368
+
369
+ # Extract session_id from request for trace context
370
+ session_id = getattr(request.extra_body, "session_id", None) if request.extra_body else None
371
+
372
+ # Use simplified trace capture
373
+ with capture_traces(session_id=session_id, entity_id=entity_id) as trace_collector:
374
+ if entity_info.type == "agent":
375
+ async for event in self._execute_agent(entity_obj, request, trace_collector):
376
+ yield event
377
+ elif entity_info.type == "workflow":
378
+ async for event in self._execute_workflow(entity_obj, request, trace_collector):
379
+ yield event
380
+ else:
381
+ raise ValueError(f"Unsupported entity type: {entity_info.type}")
382
+
383
+ # Yield any remaining trace events after execution completes
384
+ for trace_event in trace_collector.get_pending_events():
385
+ yield trace_event
386
+
387
+ except Exception as e:
388
+ logger.exception(f"Error executing entity {entity_id}: {e}")
389
+ # Yield error event
390
+ yield {"type": "error", "message": str(e), "entity_id": entity_id}
391
+
392
+ async def _execute_agent(
393
+ self, agent: Any, request: AgentFrameworkRequest, trace_collector: Any
394
+ ) -> AsyncGenerator[Any, None]:
395
+ """Execute Agent Framework agent with trace collection and optional thread support.
396
+
397
+ Args:
398
+ agent: Agent object to execute
399
+ request: Request to execute
400
+ trace_collector: Trace collector to get events from
401
+
402
+ Yields:
403
+ Agent update events and trace events
404
+ """
405
+ try:
406
+ # Convert input to proper ChatMessage or string
407
+ user_message = self._convert_input_to_chat_message(request.input)
408
+
409
+ # Get thread if provided in extra_body
410
+ thread = None
411
+ if request.extra_body and hasattr(request.extra_body, "thread_id") and request.extra_body.thread_id:
412
+ thread_id = request.extra_body.thread_id
413
+ thread = self.get_thread(thread_id)
414
+ if thread:
415
+ logger.debug(f"Using existing thread: {thread_id}")
416
+ else:
417
+ logger.warning(f"Thread {thread_id} not found, proceeding without thread")
418
+
419
+ if isinstance(user_message, str):
420
+ logger.debug(f"Executing agent with text input: {user_message[:100]}...")
421
+ else:
422
+ logger.debug(f"Executing agent with multimodal ChatMessage: {type(user_message)}")
423
+
424
+ # Use Agent Framework's native streaming with optional thread
425
+ if thread:
426
+ async for update in agent.run_stream(user_message, thread=thread):
427
+ for trace_event in trace_collector.get_pending_events():
428
+ yield trace_event
429
+
430
+ yield update
431
+ else:
432
+ async for update in agent.run_stream(user_message):
433
+ for trace_event in trace_collector.get_pending_events():
434
+ yield trace_event
435
+
436
+ yield update
437
+
438
+ except Exception as e:
439
+ logger.error(f"Error in agent execution: {e}")
440
+ yield {"type": "error", "message": f"Agent execution error: {e!s}"}
441
+
442
+ async def _execute_workflow(
443
+ self, workflow: Any, request: AgentFrameworkRequest, trace_collector: Any
444
+ ) -> AsyncGenerator[Any, None]:
445
+ """Execute Agent Framework workflow with trace collection.
446
+
447
+ Args:
448
+ workflow: Workflow object to execute
449
+ request: Request to execute
450
+ trace_collector: Trace collector to get events from
451
+
452
+ Yields:
453
+ Workflow events and trace events
454
+ """
455
+ try:
456
+ # Get input data - prefer structured data from extra_body
457
+ input_data: str | list[Any] | dict[str, Any]
458
+ if request.extra_body and hasattr(request.extra_body, "input_data") and request.extra_body.input_data:
459
+ input_data = request.extra_body.input_data
460
+ logger.debug(f"Using structured input_data from extra_body: {type(input_data)}")
461
+ else:
462
+ input_data = request.input
463
+ logger.debug(f"Using input field as fallback: {type(input_data)}")
464
+
465
+ # Parse input based on workflow's expected input type
466
+ parsed_input = await self._parse_workflow_input(workflow, input_data)
467
+
468
+ logger.debug(f"Executing workflow with parsed input type: {type(parsed_input)}")
469
+
470
+ # Use Agent Framework workflow's native streaming
471
+ async for event in workflow.run_stream(parsed_input):
472
+ # Yield any pending trace events first
473
+ for trace_event in trace_collector.get_pending_events():
474
+ yield trace_event
475
+
476
+ # Then yield the workflow event
477
+ yield event
478
+
479
+ except Exception as e:
480
+ logger.error(f"Error in workflow execution: {e}")
481
+ yield {"type": "error", "message": f"Workflow execution error: {e!s}"}
482
+
483
+ def _convert_input_to_chat_message(self, input_data: Any) -> Any:
484
+ """Convert OpenAI Responses API input to Agent Framework ChatMessage or string.
485
+
486
+ Args:
487
+ input_data: OpenAI ResponseInputParam (List[ResponseInputItemParam])
488
+
489
+ Returns:
490
+ ChatMessage for multimodal content, or string for simple text
491
+ """
492
+ # Import Agent Framework types
493
+ try:
494
+ from agent_framework import ChatMessage, DataContent, Role, TextContent
495
+ except ImportError:
496
+ # Fallback to string extraction if Agent Framework not available
497
+ return self._extract_user_message_fallback(input_data)
498
+
499
+ # Handle simple string input (backward compatibility)
500
+ if isinstance(input_data, str):
501
+ return input_data
502
+
503
+ # Handle OpenAI ResponseInputParam (List[ResponseInputItemParam])
504
+ if isinstance(input_data, list):
505
+ return self._convert_openai_input_to_chat_message(input_data, ChatMessage, TextContent, DataContent, Role)
506
+
507
+ # Fallback for other formats
508
+ return self._extract_user_message_fallback(input_data)
509
+
510
+ def _convert_openai_input_to_chat_message(
511
+ self, input_items: list[Any], ChatMessage: Any, TextContent: Any, DataContent: Any, Role: Any
512
+ ) -> Any:
513
+ """Convert OpenAI ResponseInputParam to Agent Framework ChatMessage.
514
+
515
+ Args:
516
+ input_items: List of OpenAI ResponseInputItemParam objects (dicts or objects)
517
+ ChatMessage: ChatMessage class for creating chat messages
518
+ TextContent: TextContent class for text content
519
+ DataContent: DataContent class for data/media content
520
+ Role: Role enum for message roles
521
+
522
+ Returns:
523
+ ChatMessage with converted content
524
+ """
525
+ contents = []
526
+
527
+ # Process each input item
528
+ for item in input_items:
529
+ # Handle dict format (from JSON)
530
+ if isinstance(item, dict):
531
+ item_type = item.get("type")
532
+ if item_type == "message":
533
+ # Extract content from OpenAI message
534
+ message_content = item.get("content", [])
535
+
536
+ # Handle both string content and list content
537
+ if isinstance(message_content, str):
538
+ contents.append(TextContent(text=message_content))
539
+ elif isinstance(message_content, list):
540
+ for content_item in message_content:
541
+ # Handle dict content items
542
+ if isinstance(content_item, dict):
543
+ content_type = content_item.get("type")
544
+
545
+ if content_type == "input_text":
546
+ text = content_item.get("text", "")
547
+ contents.append(TextContent(text=text))
548
+
549
+ elif content_type == "input_image":
550
+ image_url = content_item.get("image_url", "")
551
+ if image_url:
552
+ # Extract media type from data URI if possible
553
+ # Parse media type from data URL, fallback to image/png
554
+ if image_url.startswith("data:"):
555
+ try:
556
+ # Extract media type from data:image/jpeg;base64,... format
557
+ media_type = image_url.split(";")[0].split(":")[1]
558
+ except (IndexError, AttributeError):
559
+ logger.warning(
560
+ f"Failed to parse media type from data URL: {image_url[:30]}..."
561
+ )
562
+ media_type = "image/png"
563
+ else:
564
+ media_type = "image/png"
565
+ contents.append(DataContent(uri=image_url, media_type=media_type))
566
+
567
+ elif content_type == "input_file":
568
+ # Handle file input
569
+ file_data = content_item.get("file_data")
570
+ file_url = content_item.get("file_url")
571
+ filename = content_item.get("filename", "")
572
+
573
+ # Determine media type from filename
574
+ media_type = "application/octet-stream" # default
575
+ if filename:
576
+ if filename.lower().endswith(".pdf"):
577
+ media_type = "application/pdf"
578
+ elif filename.lower().endswith((".png", ".jpg", ".jpeg", ".gif")):
579
+ media_type = f"image/{filename.split('.')[-1].lower()}"
580
+ elif filename.lower().endswith((
581
+ ".wav",
582
+ ".mp3",
583
+ ".m4a",
584
+ ".ogg",
585
+ ".flac",
586
+ ".aac",
587
+ )):
588
+ ext = filename.split(".")[-1].lower()
589
+ # Normalize extensions to match audio MIME types
590
+ media_type = "audio/mp4" if ext == "m4a" else f"audio/{ext}"
591
+
592
+ # Use file_data or file_url
593
+ if file_data:
594
+ # Assume file_data is base64, create data URI
595
+ data_uri = f"data:{media_type};base64,{file_data}"
596
+ contents.append(DataContent(uri=data_uri, media_type=media_type))
597
+ elif file_url:
598
+ contents.append(DataContent(uri=file_url, media_type=media_type))
599
+
600
+ # Handle other OpenAI input item types as needed
601
+ # (tool calls, function results, etc.)
602
+
603
+ # If no contents found, create a simple text message
604
+ if not contents:
605
+ contents.append(TextContent(text=""))
606
+
607
+ chat_message = ChatMessage(role=Role.USER, contents=contents)
608
+
609
+ logger.info(f"Created ChatMessage with {len(contents)} contents:")
610
+ for idx, content in enumerate(contents):
611
+ content_type = content.__class__.__name__
612
+ if hasattr(content, "media_type"):
613
+ logger.info(f" [{idx}] {content_type} - media_type: {content.media_type}")
614
+ else:
615
+ logger.info(f" [{idx}] {content_type}")
616
+
617
+ return chat_message
618
+
619
+ def _extract_user_message_fallback(self, input_data: Any) -> str:
620
+ """Fallback method to extract user message as string.
621
+
622
+ Args:
623
+ input_data: Input data in various formats
624
+
625
+ Returns:
626
+ Extracted user message string
627
+ """
628
+ if isinstance(input_data, str):
629
+ return input_data
630
+ if isinstance(input_data, dict):
631
+ # Try common field names
632
+ for field in ["message", "text", "input", "content", "query"]:
633
+ if field in input_data:
634
+ return str(input_data[field])
635
+ # Fallback to JSON string
636
+ return json.dumps(input_data)
637
+ return str(input_data)
638
+
639
+ async def _parse_workflow_input(self, workflow: Any, raw_input: Any) -> Any:
640
+ """Parse input based on workflow's expected input type.
641
+
642
+ Args:
643
+ workflow: Workflow object
644
+ raw_input: Raw input data
645
+
646
+ Returns:
647
+ Parsed input appropriate for the workflow
648
+ """
649
+ try:
650
+ # Handle structured input
651
+ if isinstance(raw_input, dict):
652
+ return self._parse_structured_workflow_input(workflow, raw_input)
653
+ return self._parse_raw_workflow_input(workflow, str(raw_input))
654
+
655
+ except Exception as e:
656
+ logger.warning(f"Error parsing workflow input: {e}")
657
+ return raw_input
658
+
659
+ def _get_start_executor_message_types(self, workflow: Any) -> tuple[Any | None, list[Any]]:
660
+ """Return start executor and its declared input types."""
661
+ try:
662
+ start_executor = workflow.get_start_executor()
663
+ except Exception as exc: # pragma: no cover - defensive logging path
664
+ logger.debug(f"Unable to access workflow start executor: {exc}")
665
+ return None, []
666
+
667
+ if not start_executor:
668
+ return None, []
669
+
670
+ message_types: list[Any] = []
671
+
672
+ try:
673
+ input_types = getattr(start_executor, "input_types", None)
674
+ except Exception as exc: # pragma: no cover - defensive logging path
675
+ logger.debug(f"Failed to read executor input_types: {exc}")
676
+ else:
677
+ if input_types:
678
+ message_types = list(input_types)
679
+
680
+ if not message_types and hasattr(start_executor, "_handlers"):
681
+ try:
682
+ handlers = start_executor._handlers
683
+ if isinstance(handlers, dict):
684
+ message_types = list(handlers.keys())
685
+ except Exception as exc: # pragma: no cover - defensive logging path
686
+ logger.debug(f"Failed to read executor handlers: {exc}")
687
+
688
+ return start_executor, message_types
689
+
690
+ def _select_primary_input_type(self, message_types: list[Any]) -> Any | None:
691
+ """Choose the most user-friendly input type for workflow kick-off."""
692
+ if not message_types:
693
+ return None
694
+
695
+ preferred = (str, dict)
696
+
697
+ for candidate in preferred:
698
+ for message_type in message_types:
699
+ if message_type is candidate:
700
+ return candidate
701
+ origin = get_origin(message_type)
702
+ if origin is candidate:
703
+ return candidate
704
+
705
+ return message_types[0]
706
+
707
+ def _parse_structured_workflow_input(self, workflow: Any, input_data: dict[str, Any]) -> Any:
708
+ """Parse structured input data for workflow execution.
709
+
710
+ Args:
711
+ workflow: Workflow object
712
+ input_data: Structured input data
713
+
714
+ Returns:
715
+ Parsed input for workflow
716
+ """
717
+ try:
718
+ from ._utils import parse_input_for_type
719
+
720
+ # Get the start executor and its input type
721
+ start_executor, message_types = self._get_start_executor_message_types(workflow)
722
+ if not start_executor:
723
+ logger.debug("Cannot determine input type for workflow - using raw dict")
724
+ return input_data
725
+
726
+ if not message_types:
727
+ logger.debug("No message types found for start executor - using raw dict")
728
+ return input_data
729
+
730
+ # Get the first (primary) input type
731
+ input_type = self._select_primary_input_type(message_types)
732
+ if input_type is None:
733
+ logger.debug("Could not select primary input type for workflow - using raw dict")
734
+ return input_data
735
+
736
+ # Use consolidated parsing logic from _utils
737
+ return parse_input_for_type(input_data, input_type)
738
+
739
+ except Exception as e:
740
+ logger.warning(f"Error parsing structured workflow input: {e}")
741
+ return input_data
742
+
743
+ def _parse_raw_workflow_input(self, workflow: Any, raw_input: str) -> Any:
744
+ """Parse raw input string based on workflow's expected input type.
745
+
746
+ Args:
747
+ workflow: Workflow object
748
+ raw_input: Raw input string
749
+
750
+ Returns:
751
+ Parsed input for workflow
752
+ """
753
+ try:
754
+ from ._utils import parse_input_for_type
755
+
756
+ # Get the start executor and its input type
757
+ start_executor, message_types = self._get_start_executor_message_types(workflow)
758
+ if not start_executor:
759
+ logger.debug("Cannot determine input type for workflow - using raw string")
760
+ return raw_input
761
+
762
+ if not message_types:
763
+ logger.debug("No message types found for start executor - using raw string")
764
+ return raw_input
765
+
766
+ # Get the first (primary) input type
767
+ input_type = self._select_primary_input_type(message_types)
768
+ if input_type is None:
769
+ logger.debug("Could not select primary input type for workflow - using raw string")
770
+ return raw_input
771
+
772
+ # Use consolidated parsing logic from _utils
773
+ return parse_input_for_type(raw_input, input_type)
774
+
775
+ except Exception as e:
776
+ logger.debug(f"Error parsing workflow input: {e}")
777
+ return raw_input