agent-framework-devui 1.0.0b251016__py3-none-any.whl → 1.0.0b251028__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agent-framework-devui might be problematic. Click here for more details.

@@ -127,7 +127,7 @@ class EntityDiscovery:
127
127
 
128
128
  # Cache the loaded object
129
129
  self._loaded_objects[entity_id] = entity_obj
130
- logger.info(f"Successfully loaded entity: {entity_id} (type: {enriched_info.type})")
130
+ logger.info(f"Successfully loaded entity: {entity_id} (type: {enriched_info.type})")
131
131
 
132
132
  return entity_obj
133
133
 
@@ -217,7 +217,7 @@ class EntityDiscovery:
217
217
  if entity_info and "lazy_loaded" in entity_info.metadata:
218
218
  entity_info.metadata["lazy_loaded"] = False
219
219
 
220
- logger.info(f"♻️ Entity invalidated: {entity_id} (will reload on next access)")
220
+ logger.info(f"Entity invalidated: {entity_id} (will reload on next access)")
221
221
 
222
222
  def invalidate_all(self) -> None:
223
223
  """Invalidate all cached entities.
@@ -217,6 +217,11 @@ class AgentFrameworkExecutor:
217
217
  Agent update events and trace events
218
218
  """
219
219
  try:
220
+ # Emit agent lifecycle start event
221
+ from .models._openai_custom import AgentStartedEvent
222
+
223
+ yield AgentStartedEvent()
224
+
220
225
  # Convert input to proper ChatMessage or string
221
226
  user_message = self._convert_input_to_chat_message(request.input)
222
227
 
@@ -266,8 +271,19 @@ class AgentFrameworkExecutor:
266
271
  else:
267
272
  raise ValueError("Agent must implement either run() or run_stream() method")
268
273
 
274
+ # Emit agent lifecycle completion event
275
+ from .models._openai_custom import AgentCompletedEvent
276
+
277
+ yield AgentCompletedEvent()
278
+
269
279
  except Exception as e:
270
280
  logger.error(f"Error in agent execution: {e}")
281
+ # Emit agent lifecycle failure event
282
+ from .models._openai_custom import AgentFailedEvent
283
+
284
+ yield AgentFailedEvent(error=e)
285
+
286
+ # Still yield the error for backward compatibility
271
287
  yield {"type": "error", "message": f"Agent execution error: {e!s}"}
272
288
 
273
289
  async def _execute_workflow(
@@ -284,14 +300,9 @@ class AgentFrameworkExecutor:
284
300
  Workflow events and trace events
285
301
  """
286
302
  try:
287
- # Get input data - prefer structured data from extra_body
288
- input_data: str | list[Any] | dict[str, Any]
289
- if request.extra_body and isinstance(request.extra_body, dict) and request.extra_body.get("input_data"):
290
- input_data = request.extra_body.get("input_data") # type: ignore
291
- logger.debug(f"Using structured input_data from extra_body: {type(input_data)}")
292
- else:
293
- input_data = request.input
294
- logger.debug(f"Using input field as fallback: {type(input_data)}")
303
+ # Get input data directly from request.input field
304
+ input_data = request.input
305
+ logger.debug(f"Using input field: {type(input_data)}")
295
306
 
296
307
  # Parse input based on workflow's expected input type
297
308
  parsed_input = await self._parse_workflow_input(workflow, input_data)
@@ -4,17 +4,32 @@
4
4
 
5
5
  import json
6
6
  import logging
7
+ import time
7
8
  import uuid
8
9
  from collections import OrderedDict
9
10
  from collections.abc import Sequence
10
11
  from datetime import datetime
11
12
  from typing import Any, Union
13
+ from uuid import uuid4
14
+
15
+ from openai.types.responses import (
16
+ Response,
17
+ ResponseContentPartAddedEvent,
18
+ ResponseCreatedEvent,
19
+ ResponseError,
20
+ ResponseFailedEvent,
21
+ ResponseInProgressEvent,
22
+ )
12
23
 
13
24
  from .models import (
14
25
  AgentFrameworkRequest,
26
+ CustomResponseOutputItemAddedEvent,
27
+ CustomResponseOutputItemDoneEvent,
28
+ ExecutorActionItem,
15
29
  InputTokensDetails,
16
30
  OpenAIResponse,
17
31
  OutputTokensDetails,
32
+ ResponseCompletedEvent,
18
33
  ResponseErrorEvent,
19
34
  ResponseFunctionCallArgumentsDeltaEvent,
20
35
  ResponseFunctionResultComplete,
@@ -41,6 +56,56 @@ EventType = Union[
41
56
  ]
42
57
 
43
58
 
59
+ def _serialize_content_recursive(value: Any) -> Any:
60
+ """Recursively serialize Agent Framework Content objects to JSON-compatible values.
61
+
62
+ This handles nested Content objects (like TextContent inside FunctionResultContent.result)
63
+ that can't be directly serialized by json.dumps().
64
+
65
+ Args:
66
+ value: Value to serialize (can be Content object, dict, list, primitive, etc.)
67
+
68
+ Returns:
69
+ JSON-serializable version with all Content objects converted to dicts/primitives
70
+ """
71
+ # Handle None and basic JSON-serializable types
72
+ if value is None or isinstance(value, (str, int, float, bool)):
73
+ return value
74
+
75
+ # Check if it's a SerializationMixin (includes all Content types)
76
+ # Content objects have to_dict() method
77
+ if hasattr(value, "to_dict") and callable(getattr(value, "to_dict", None)):
78
+ try:
79
+ return value.to_dict()
80
+ except Exception as e:
81
+ # If to_dict() fails, fall through to other methods
82
+ logger.debug(f"Failed to serialize with to_dict(): {e}")
83
+
84
+ # Handle dictionaries - recursively process values
85
+ if isinstance(value, dict):
86
+ return {key: _serialize_content_recursive(val) for key, val in value.items()}
87
+
88
+ # Handle lists and tuples - recursively process elements
89
+ if isinstance(value, (list, tuple)):
90
+ serialized = [_serialize_content_recursive(item) for item in value]
91
+ # For single-item lists containing text Content, extract just the text
92
+ # This handles the MCP case where result = [TextContent(text="Hello")]
93
+ # and we want output = "Hello" not output = '[{"type": "text", "text": "Hello"}]'
94
+ if len(serialized) == 1 and isinstance(serialized[0], dict) and serialized[0].get("type") == "text":
95
+ return serialized[0].get("text", "")
96
+ return serialized
97
+
98
+ # For other objects with model_dump(), try that
99
+ if hasattr(value, "model_dump") and callable(getattr(value, "model_dump", None)):
100
+ try:
101
+ return value.model_dump()
102
+ except Exception as e:
103
+ logger.debug(f"Failed to serialize with model_dump(): {e}")
104
+
105
+ # Return as-is and let json.dumps handle it (may raise TypeError for non-serializable types)
106
+ return value
107
+
108
+
44
109
  class MessageMapper:
45
110
  """Maps Agent Framework messages/responses to OpenAI format."""
46
111
 
@@ -102,6 +167,12 @@ class MessageMapper:
102
167
  )
103
168
  ]
104
169
 
170
+ # Handle Agent lifecycle events first
171
+ from .models._openai_custom import AgentCompletedEvent, AgentFailedEvent, AgentStartedEvent
172
+
173
+ if isinstance(raw_event, (AgentStartedEvent, AgentCompletedEvent, AgentFailedEvent)):
174
+ return await self._convert_agent_lifecycle_event(raw_event, context)
175
+
105
176
  # Import Agent Framework types for proper isinstance checks
106
177
  try:
107
178
  from agent_framework import AgentRunResponse, AgentRunResponseUpdate, WorkflowEvent
@@ -245,6 +316,7 @@ class MessageMapper:
245
316
  "content_index": 0,
246
317
  "output_index": 0,
247
318
  "request_id": str(request_key), # For usage accumulation
319
+ "request": request, # Store the request for model name access
248
320
  # Track active function calls: {call_id: {name, item_id, args_chunks}}
249
321
  "active_function_calls": {},
250
322
  }
@@ -267,7 +339,7 @@ class MessageMapper:
267
339
  return int(context["sequence_counter"])
268
340
 
269
341
  async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> Sequence[Any]:
270
- """Convert AgentRunResponseUpdate to OpenAI events using comprehensive content mapping.
342
+ """Convert agent text updates to proper content part events.
271
343
 
272
344
  Args:
273
345
  update: Agent run response update
@@ -283,10 +355,60 @@ class MessageMapper:
283
355
  if not hasattr(update, "contents") or not update.contents:
284
356
  return events
285
357
 
358
+ # Check if we're streaming text content
359
+ has_text_content = any(content.__class__.__name__ == "TextContent" for content in update.contents)
360
+
361
+ # If we have text content and haven't created a message yet, create one
362
+ if has_text_content and "current_message_id" not in context:
363
+ message_id = f"msg_{uuid4().hex[:8]}"
364
+ context["current_message_id"] = message_id
365
+ context["output_index"] = context.get("output_index", -1) + 1
366
+
367
+ # Add message output item
368
+ events.append(
369
+ ResponseOutputItemAddedEvent(
370
+ type="response.output_item.added",
371
+ output_index=context["output_index"],
372
+ sequence_number=self._next_sequence(context),
373
+ item=ResponseOutputMessage(
374
+ type="message", id=message_id, role="assistant", content=[], status="in_progress"
375
+ ),
376
+ )
377
+ )
378
+
379
+ # Add content part for text
380
+ context["content_index"] = 0
381
+ events.append(
382
+ ResponseContentPartAddedEvent(
383
+ type="response.content_part.added",
384
+ output_index=context["output_index"],
385
+ content_index=context["content_index"],
386
+ item_id=message_id,
387
+ sequence_number=self._next_sequence(context),
388
+ part=ResponseOutputText(type="output_text", text="", annotations=[]),
389
+ )
390
+ )
391
+
392
+ # Process each content item
286
393
  for content in update.contents:
287
394
  content_type = content.__class__.__name__
288
395
 
289
- if content_type in self.content_mappers:
396
+ # Special handling for TextContent to use proper delta events
397
+ if content_type == "TextContent" and "current_message_id" in context:
398
+ # Stream text content via proper delta events
399
+ events.append(
400
+ ResponseTextDeltaEvent(
401
+ type="response.output_text.delta",
402
+ output_index=context["output_index"],
403
+ content_index=context.get("content_index", 0),
404
+ item_id=context["current_message_id"],
405
+ delta=content.text,
406
+ logprobs=[], # We don't have logprobs from Agent Framework
407
+ sequence_number=self._next_sequence(context),
408
+ )
409
+ )
410
+ elif content_type in self.content_mappers:
411
+ # Use existing mappers for other content types
290
412
  mapped_events = await self.content_mappers[content_type](content, context)
291
413
  if mapped_events is not None: # Handle None returns (e.g., UsageContent)
292
414
  if isinstance(mapped_events, list):
@@ -297,7 +419,9 @@ class MessageMapper:
297
419
  # Graceful fallback for unknown content types
298
420
  events.append(await self._create_unknown_content_event(content, context))
299
421
 
300
- context["content_index"] += 1
422
+ # Don't increment content_index for text deltas within the same part
423
+ if content_type != "TextContent":
424
+ context["content_index"] = context.get("content_index", 0) + 1
301
425
 
302
426
  except Exception as e:
303
427
  logger.warning(f"Error converting agent update: {e}")
@@ -358,8 +482,105 @@ class MessageMapper:
358
482
 
359
483
  return events
360
484
 
485
+ async def _convert_agent_lifecycle_event(self, event: Any, context: dict[str, Any]) -> Sequence[Any]:
486
+ """Convert agent lifecycle events to OpenAI response events.
487
+
488
+ Args:
489
+ event: AgentStartedEvent, AgentCompletedEvent, or AgentFailedEvent
490
+ context: Conversion context
491
+
492
+ Returns:
493
+ List of OpenAI response stream events
494
+ """
495
+ from .models._openai_custom import AgentCompletedEvent, AgentFailedEvent, AgentStartedEvent
496
+
497
+ try:
498
+ # Get model name from context (the agent name)
499
+ model_name = context.get("request", {}).model if context.get("request") else "agent"
500
+
501
+ if isinstance(event, AgentStartedEvent):
502
+ execution_id = f"agent_{uuid4().hex[:12]}"
503
+ context["execution_id"] = execution_id
504
+
505
+ # Create Response object
506
+ response_obj = Response(
507
+ id=f"resp_{execution_id}",
508
+ object="response",
509
+ created_at=float(time.time()),
510
+ model=model_name,
511
+ output=[],
512
+ status="in_progress",
513
+ parallel_tool_calls=False,
514
+ tool_choice="none",
515
+ tools=[],
516
+ )
517
+
518
+ # Emit both created and in_progress events
519
+ return [
520
+ ResponseCreatedEvent(
521
+ type="response.created", sequence_number=self._next_sequence(context), response=response_obj
522
+ ),
523
+ ResponseInProgressEvent(
524
+ type="response.in_progress", sequence_number=self._next_sequence(context), response=response_obj
525
+ ),
526
+ ]
527
+
528
+ if isinstance(event, AgentCompletedEvent):
529
+ execution_id = context.get("execution_id", f"agent_{uuid4().hex[:12]}")
530
+
531
+ response_obj = Response(
532
+ id=f"resp_{execution_id}",
533
+ object="response",
534
+ created_at=float(time.time()),
535
+ model=model_name,
536
+ output=[],
537
+ status="completed",
538
+ parallel_tool_calls=False,
539
+ tool_choice="none",
540
+ tools=[],
541
+ )
542
+
543
+ return [
544
+ ResponseCompletedEvent(
545
+ type="response.completed", sequence_number=self._next_sequence(context), response=response_obj
546
+ )
547
+ ]
548
+
549
+ if isinstance(event, AgentFailedEvent):
550
+ execution_id = context.get("execution_id", f"agent_{uuid4().hex[:12]}")
551
+
552
+ # Create error object
553
+ response_error = ResponseError(
554
+ message=str(event.error) if event.error else "Unknown error", code="server_error"
555
+ )
556
+
557
+ response_obj = Response(
558
+ id=f"resp_{execution_id}",
559
+ object="response",
560
+ created_at=float(time.time()),
561
+ model=model_name,
562
+ output=[],
563
+ status="failed",
564
+ error=response_error,
565
+ parallel_tool_calls=False,
566
+ tool_choice="none",
567
+ tools=[],
568
+ )
569
+
570
+ return [
571
+ ResponseFailedEvent(
572
+ type="response.failed", sequence_number=self._next_sequence(context), response=response_obj
573
+ )
574
+ ]
575
+
576
+ return []
577
+
578
+ except Exception as e:
579
+ logger.warning(f"Error converting agent lifecycle event: {e}")
580
+ return [await self._create_error_event(str(e), context)]
581
+
361
582
  async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> Sequence[Any]:
362
- """Convert workflow event to structured OpenAI events.
583
+ """Convert workflow events to standard OpenAI event objects.
363
584
 
364
585
  Args:
365
586
  event: Workflow event
@@ -369,22 +590,247 @@ class MessageMapper:
369
590
  List of OpenAI response stream events
370
591
  """
371
592
  try:
593
+ event_class = event.__class__.__name__
594
+
595
+ # Response-level events - construct proper OpenAI objects
596
+ if event_class == "WorkflowStartedEvent":
597
+ workflow_id = getattr(event, "workflow_id", str(uuid4()))
598
+ context["workflow_id"] = workflow_id
599
+
600
+ # Import Response type for proper construction
601
+ from openai.types.responses import Response
602
+
603
+ # Return proper OpenAI event objects
604
+ events: list[Any] = []
605
+
606
+ # Determine the model name - use request model or default to "workflow"
607
+ # The request model will be the agent name for agents, workflow name for workflows
608
+ model_name = context.get("request", {}).model if context.get("request") else "workflow"
609
+
610
+ # Create a full Response object with all required fields
611
+ response_obj = Response(
612
+ id=f"resp_{workflow_id}",
613
+ object="response",
614
+ created_at=float(time.time()),
615
+ model=model_name, # Use the actual model/agent name
616
+ output=[], # Empty output list initially
617
+ status="in_progress",
618
+ # Required fields with safe defaults
619
+ parallel_tool_calls=False,
620
+ tool_choice="none",
621
+ tools=[],
622
+ )
623
+
624
+ # First emit response.created
625
+ events.append(
626
+ ResponseCreatedEvent(
627
+ type="response.created", sequence_number=self._next_sequence(context), response=response_obj
628
+ )
629
+ )
630
+
631
+ # Then emit response.in_progress (reuse same response object)
632
+ events.append(
633
+ ResponseInProgressEvent(
634
+ type="response.in_progress", sequence_number=self._next_sequence(context), response=response_obj
635
+ )
636
+ )
637
+
638
+ return events
639
+
640
+ if event_class in ["WorkflowCompletedEvent", "WorkflowOutputEvent"]:
641
+ workflow_id = context.get("workflow_id", str(uuid4()))
642
+
643
+ # Import Response type for proper construction
644
+ from openai.types.responses import Response
645
+
646
+ # Get model name from context
647
+ model_name = context.get("request", {}).model if context.get("request") else "workflow"
648
+
649
+ # Create a full Response object for completed state
650
+ response_obj = Response(
651
+ id=f"resp_{workflow_id}",
652
+ object="response",
653
+ created_at=float(time.time()),
654
+ model=model_name,
655
+ output=[], # Output should be populated by this point from text streaming
656
+ status="completed",
657
+ parallel_tool_calls=False,
658
+ tool_choice="none",
659
+ tools=[],
660
+ )
661
+
662
+ return [
663
+ ResponseCompletedEvent(
664
+ type="response.completed", sequence_number=self._next_sequence(context), response=response_obj
665
+ )
666
+ ]
667
+
668
+ if event_class == "WorkflowFailedEvent":
669
+ workflow_id = context.get("workflow_id", str(uuid4()))
670
+ error_info = getattr(event, "error", None)
671
+
672
+ # Import Response and ResponseError types
673
+ from openai.types.responses import Response, ResponseError
674
+
675
+ # Get model name from context
676
+ model_name = context.get("request", {}).model if context.get("request") else "workflow"
677
+
678
+ # Create error object
679
+ error_message = str(error_info) if error_info else "Unknown error"
680
+
681
+ # Create ResponseError object (code must be one of the allowed values)
682
+ response_error = ResponseError(
683
+ message=error_message,
684
+ code="server_error", # Use generic server_error code for workflow failures
685
+ )
686
+
687
+ # Create a full Response object for failed state
688
+ response_obj = Response(
689
+ id=f"resp_{workflow_id}",
690
+ object="response",
691
+ created_at=float(time.time()),
692
+ model=model_name,
693
+ output=[],
694
+ status="failed",
695
+ error=response_error,
696
+ parallel_tool_calls=False,
697
+ tool_choice="none",
698
+ tools=[],
699
+ )
700
+
701
+ return [
702
+ ResponseFailedEvent(
703
+ type="response.failed", sequence_number=self._next_sequence(context), response=response_obj
704
+ )
705
+ ]
706
+
707
+ # Executor-level events (output items)
708
+ if event_class == "ExecutorInvokedEvent":
709
+ executor_id = getattr(event, "executor_id", "unknown")
710
+ item_id = f"exec_{executor_id}_{uuid4().hex[:8]}"
711
+ context[f"exec_item_{executor_id}"] = item_id
712
+ context["output_index"] = context.get("output_index", -1) + 1
713
+
714
+ # Create ExecutorActionItem with proper type
715
+ executor_item = ExecutorActionItem(
716
+ type="executor_action",
717
+ id=item_id,
718
+ executor_id=executor_id,
719
+ status="in_progress",
720
+ metadata=getattr(event, "metadata", {}),
721
+ )
722
+
723
+ # Use our custom event type that accepts ExecutorActionItem
724
+ return [
725
+ CustomResponseOutputItemAddedEvent(
726
+ type="response.output_item.added",
727
+ output_index=context["output_index"],
728
+ sequence_number=self._next_sequence(context),
729
+ item=executor_item,
730
+ )
731
+ ]
732
+
733
+ if event_class == "ExecutorCompletedEvent":
734
+ executor_id = getattr(event, "executor_id", "unknown")
735
+ item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown")
736
+
737
+ # Create ExecutorActionItem with completed status
738
+ # ExecutorCompletedEvent uses 'data' field, not 'result'
739
+ executor_item = ExecutorActionItem(
740
+ type="executor_action",
741
+ id=item_id,
742
+ executor_id=executor_id,
743
+ status="completed",
744
+ result=getattr(event, "data", None),
745
+ )
746
+
747
+ # Use our custom event type
748
+ return [
749
+ CustomResponseOutputItemDoneEvent(
750
+ type="response.output_item.done",
751
+ output_index=context.get("output_index", 0),
752
+ sequence_number=self._next_sequence(context),
753
+ item=executor_item,
754
+ )
755
+ ]
756
+
757
+ if event_class == "ExecutorFailedEvent":
758
+ executor_id = getattr(event, "executor_id", "unknown")
759
+ item_id = context.get(f"exec_item_{executor_id}", f"exec_{executor_id}_unknown")
760
+ error_info = getattr(event, "error", None)
761
+
762
+ # Create ExecutorActionItem with failed status
763
+ executor_item = ExecutorActionItem(
764
+ type="executor_action",
765
+ id=item_id,
766
+ executor_id=executor_id,
767
+ status="failed",
768
+ error={"message": str(error_info)} if error_info else None,
769
+ )
770
+
771
+ # Use our custom event type
772
+ return [
773
+ CustomResponseOutputItemDoneEvent(
774
+ type="response.output_item.done",
775
+ output_index=context.get("output_index", 0),
776
+ sequence_number=self._next_sequence(context),
777
+ item=executor_item,
778
+ )
779
+ ]
780
+
781
+ # Handle informational workflow events (status, warnings, errors)
782
+ if event_class in ["WorkflowStatusEvent", "WorkflowWarningEvent", "WorkflowErrorEvent", "RequestInfoEvent"]:
783
+ # These are informational events that don't map to OpenAI lifecycle events
784
+ # Convert them to trace events for debugging visibility
785
+ event_data: dict[str, Any] = {}
786
+
787
+ # Extract relevant data based on event type
788
+ if event_class == "WorkflowStatusEvent":
789
+ event_data["state"] = str(getattr(event, "state", "unknown"))
790
+ elif event_class == "WorkflowWarningEvent":
791
+ event_data["message"] = str(getattr(event, "message", ""))
792
+ elif event_class == "WorkflowErrorEvent":
793
+ event_data["message"] = str(getattr(event, "message", ""))
794
+ event_data["error"] = str(getattr(event, "error", ""))
795
+ elif event_class == "RequestInfoEvent":
796
+ request_info = getattr(event, "data", {})
797
+ event_data["request_info"] = request_info if isinstance(request_info, dict) else str(request_info)
798
+
799
+ # Create a trace event for debugging
800
+ trace_event = ResponseTraceEventComplete(
801
+ type="response.trace.complete",
802
+ data={
803
+ "trace_type": "workflow_info",
804
+ "event_type": event_class,
805
+ "data": event_data,
806
+ "timestamp": datetime.now().isoformat(),
807
+ },
808
+ span_id=f"workflow_info_{uuid4().hex[:8]}",
809
+ item_id=context["item_id"],
810
+ output_index=context.get("output_index", 0),
811
+ sequence_number=self._next_sequence(context),
812
+ )
813
+
814
+ return [trace_event]
815
+
816
+ # For unknown/legacy events, still emit as workflow event for backward compatibility
372
817
  # Get event data and serialize if it's a SerializationMixin
373
- event_data = getattr(event, "data", None)
374
- if event_data is not None and hasattr(event_data, "to_dict"):
818
+ raw_event_data = getattr(event, "data", None)
819
+ serialized_event_data: dict[str, Any] | str | None = raw_event_data
820
+ if raw_event_data is not None and hasattr(raw_event_data, "to_dict"):
375
821
  # SerializationMixin objects - convert to dict for JSON serialization
376
822
  try:
377
- event_data = event_data.to_dict()
823
+ serialized_event_data = raw_event_data.to_dict()
378
824
  except Exception as e:
379
825
  logger.debug(f"Failed to serialize event data with to_dict(): {e}")
380
- event_data = str(event_data)
826
+ serialized_event_data = str(raw_event_data)
381
827
 
382
- # Create structured workflow event
828
+ # Create structured workflow event (keeping for backward compatibility)
383
829
  workflow_event = ResponseWorkflowEventComplete(
384
830
  type="response.workflow_event.complete",
385
831
  data={
386
832
  "event_type": event.__class__.__name__,
387
- "data": event_data,
833
+ "data": serialized_event_data,
388
834
  "executor_id": getattr(event, "executor_id", None),
389
835
  "timestamp": datetime.now().isoformat(),
390
836
  },
@@ -394,6 +840,7 @@ class MessageMapper:
394
840
  sequence_number=self._next_sequence(context),
395
841
  )
396
842
 
843
+ logger.debug(f"Unhandled workflow event type: {event_class}, emitting as legacy workflow event")
397
844
  return [workflow_event]
398
845
 
399
846
  except Exception as e:
@@ -538,8 +985,16 @@ class MessageMapper:
538
985
  result = getattr(content, "result", None)
539
986
  exception = getattr(content, "exception", None)
540
987
 
541
- # Convert result to string
542
- output = result if isinstance(result, str) else json.dumps(result) if result is not None else ""
988
+ # Convert result to string, handling nested Content objects from MCP tools
989
+ if isinstance(result, str):
990
+ output = result
991
+ elif result is not None:
992
+ # Recursively serialize any nested Content objects (e.g., from MCP tools)
993
+ serialized = _serialize_content_recursive(result)
994
+ # Convert to JSON string if still not a string
995
+ output = serialized if isinstance(serialized, str) else json.dumps(serialized)
996
+ else:
997
+ output = ""
543
998
 
544
999
  # Determine status based on exception
545
1000
  status = "incomplete" if exception else "completed"
@@ -556,6 +1011,7 @@ class MessageMapper:
556
1011
  item_id=item_id,
557
1012
  output_index=context["output_index"],
558
1013
  sequence_number=self._next_sequence(context),
1014
+ timestamp=datetime.now().isoformat(),
559
1015
  )
560
1016
 
561
1017
  async def _map_error_content(self, content: Any, context: dict[str, Any]) -> ResponseErrorEvent:
@@ -723,7 +1179,7 @@ class MessageMapper:
723
1179
  async def _create_unknown_content_event(self, content: Any, context: dict[str, Any]) -> ResponseStreamEvent:
724
1180
  """Create event for unknown content types."""
725
1181
  content_type = content.__class__.__name__
726
- text = f"⚠️ Unknown content type: {content_type}\n"
1182
+ text = f"Warning: Unknown content type: {content_type}\n"
727
1183
  return self._create_text_delta_event(text, context)
728
1184
 
729
1185
  async def _create_error_response(self, error_message: str, request: AgentFrameworkRequest) -> OpenAIResponse: