agent-framework-devui 1.0.0b251001__py3-none-any.whl → 1.0.0b251016__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agent-framework-devui might be problematic. Click here for more details.

@@ -5,8 +5,8 @@
5
5
  import json
6
6
  import logging
7
7
  import uuid
8
+ from collections import OrderedDict
8
9
  from collections.abc import Sequence
9
- from dataclasses import asdict, is_dataclass
10
10
  from datetime import datetime
11
11
  from typing import Any, Union
12
12
 
@@ -18,6 +18,8 @@ from .models import (
18
18
  ResponseErrorEvent,
19
19
  ResponseFunctionCallArgumentsDeltaEvent,
20
20
  ResponseFunctionResultComplete,
21
+ ResponseFunctionToolCall,
22
+ ResponseOutputItemAddedEvent,
21
23
  ResponseOutputMessage,
22
24
  ResponseOutputText,
23
25
  ResponseReasoningTextDeltaEvent,
@@ -25,7 +27,6 @@ from .models import (
25
27
  ResponseTextDeltaEvent,
26
28
  ResponseTraceEventComplete,
27
29
  ResponseUsage,
28
- ResponseUsageEventComplete,
29
30
  ResponseWorkflowEventComplete,
30
31
  )
31
32
 
@@ -35,19 +36,26 @@ logger = logging.getLogger(__name__)
35
36
  EventType = Union[
36
37
  ResponseStreamEvent,
37
38
  ResponseWorkflowEventComplete,
38
- ResponseFunctionResultComplete,
39
+ ResponseOutputItemAddedEvent,
39
40
  ResponseTraceEventComplete,
40
- ResponseUsageEventComplete,
41
41
  ]
42
42
 
43
43
 
44
44
  class MessageMapper:
45
45
  """Maps Agent Framework messages/responses to OpenAI format."""
46
46
 
47
- def __init__(self) -> None:
48
- """Initialize Agent Framework message mapper."""
47
+ def __init__(self, max_contexts: int = 1000) -> None:
48
+ """Initialize Agent Framework message mapper.
49
+
50
+ Args:
51
+ max_contexts: Maximum number of contexts to keep in memory (default: 1000)
52
+ """
49
53
  self.sequence_counter = 0
50
- self._conversion_contexts: dict[int, dict[str, Any]] = {}
54
+ self._conversion_contexts: OrderedDict[int, dict[str, Any]] = OrderedDict()
55
+ self._max_contexts = max_contexts
56
+
57
+ # Track usage per request for final Response.usage (OpenAI standard)
58
+ self._usage_accumulator: dict[str, dict[str, int]] = {}
51
59
 
52
60
  # Register content type mappers for all 12 Agent Framework content types
53
61
  self.content_mappers = {
@@ -96,9 +104,23 @@ class MessageMapper:
96
104
 
97
105
  # Import Agent Framework types for proper isinstance checks
98
106
  try:
99
- from agent_framework import AgentRunResponseUpdate, WorkflowEvent
107
+ from agent_framework import AgentRunResponse, AgentRunResponseUpdate, WorkflowEvent
108
+ from agent_framework._workflows._events import AgentRunUpdateEvent
109
+
110
+ # Handle AgentRunUpdateEvent - workflow event wrapping AgentRunResponseUpdate
111
+ # This must be checked BEFORE generic WorkflowEvent check
112
+ if isinstance(raw_event, AgentRunUpdateEvent):
113
+ # Extract the AgentRunResponseUpdate from the event's data attribute
114
+ if raw_event.data and isinstance(raw_event.data, AgentRunResponseUpdate):
115
+ return await self._convert_agent_update(raw_event.data, context)
116
+ # If no data, treat as generic workflow event
117
+ return await self._convert_workflow_event(raw_event, context)
118
+
119
+ # Handle complete agent response (AgentRunResponse) - for non-streaming agent execution
120
+ if isinstance(raw_event, AgentRunResponse):
121
+ return await self._convert_agent_response(raw_event, context)
100
122
 
101
- # Handle agent updates (AgentRunResponseUpdate)
123
+ # Handle agent updates (AgentRunResponseUpdate) - for direct agent execution
102
124
  if isinstance(raw_event, AgentRunResponseUpdate):
103
125
  return await self._convert_agent_update(raw_event, context)
104
126
 
@@ -150,17 +172,31 @@ class MessageMapper:
150
172
  status="completed",
151
173
  )
152
174
 
153
- # Create usage object
154
- input_token_count = len(str(request.input)) // 4 if request.input else 0
155
- output_token_count = len(full_content) // 4
156
-
157
- usage = ResponseUsage(
158
- input_tokens=input_token_count,
159
- output_tokens=output_token_count,
160
- total_tokens=input_token_count + output_token_count,
161
- input_tokens_details=InputTokensDetails(cached_tokens=0),
162
- output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
163
- )
175
+ # Get usage from accumulator (OpenAI standard)
176
+ request_id = str(id(request))
177
+ usage_data = self._usage_accumulator.get(request_id)
178
+
179
+ if usage_data:
180
+ usage = ResponseUsage(
181
+ input_tokens=usage_data["input_tokens"],
182
+ output_tokens=usage_data["output_tokens"],
183
+ total_tokens=usage_data["total_tokens"],
184
+ input_tokens_details=InputTokensDetails(cached_tokens=0),
185
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
186
+ )
187
+ # Cleanup accumulator
188
+ del self._usage_accumulator[request_id]
189
+ else:
190
+ # Fallback: estimate if no usage was tracked
191
+ input_token_count = len(str(request.input)) // 4 if request.input else 0
192
+ output_token_count = len(full_content) // 4
193
+ usage = ResponseUsage(
194
+ input_tokens=input_token_count,
195
+ output_tokens=output_token_count,
196
+ total_tokens=input_token_count + output_token_count,
197
+ input_tokens_details=InputTokensDetails(cached_tokens=0),
198
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
199
+ )
164
200
 
165
201
  return OpenAIResponse(
166
202
  id=f"resp_{uuid.uuid4().hex[:12]}",
@@ -177,10 +213,18 @@ class MessageMapper:
177
213
  except Exception as e:
178
214
  logger.exception(f"Error aggregating response: {e}")
179
215
  return await self._create_error_response(str(e), request)
216
+ finally:
217
+ # Cleanup: Remove context after aggregation to prevent memory leak
218
+ # This handles the common case where streaming completes successfully
219
+ request_key = id(request)
220
+ if self._conversion_contexts.pop(request_key, None):
221
+ logger.debug(f"Cleaned up context for request {request_key} after aggregation")
180
222
 
181
223
  def _get_or_create_context(self, request: AgentFrameworkRequest) -> dict[str, Any]:
182
224
  """Get or create conversion context for this request.
183
225
 
226
+ Uses LRU eviction when max_contexts is reached to prevent unbounded memory growth.
227
+
184
228
  Args:
185
229
  request: Request to get context for
186
230
 
@@ -188,13 +232,26 @@ class MessageMapper:
188
232
  Conversion context dictionary
189
233
  """
190
234
  request_key = id(request)
235
+
191
236
  if request_key not in self._conversion_contexts:
237
+ # Evict oldest context if at capacity (LRU eviction)
238
+ if len(self._conversion_contexts) >= self._max_contexts:
239
+ evicted_key, _ = self._conversion_contexts.popitem(last=False)
240
+ logger.debug(f"Evicted oldest context (key={evicted_key}) - at max capacity ({self._max_contexts})")
241
+
192
242
  self._conversion_contexts[request_key] = {
193
243
  "sequence_counter": 0,
194
244
  "item_id": f"msg_{uuid.uuid4().hex[:8]}",
195
245
  "content_index": 0,
196
246
  "output_index": 0,
247
+ "request_id": str(request_key), # For usage accumulation
248
+ # Track active function calls: {call_id: {name, item_id, args_chunks}}
249
+ "active_function_calls": {},
197
250
  }
251
+ else:
252
+ # Move to end (mark as recently used for LRU)
253
+ self._conversion_contexts.move_to_end(request_key)
254
+
198
255
  return self._conversion_contexts[request_key]
199
256
 
200
257
  def _next_sequence(self, context: dict[str, Any]) -> int:
@@ -231,10 +288,11 @@ class MessageMapper:
231
288
 
232
289
  if content_type in self.content_mappers:
233
290
  mapped_events = await self.content_mappers[content_type](content, context)
234
- if isinstance(mapped_events, list):
235
- events.extend(mapped_events)
236
- else:
237
- events.append(mapped_events)
291
+ if mapped_events is not None: # Handle None returns (e.g., UsageContent)
292
+ if isinstance(mapped_events, list):
293
+ events.extend(mapped_events)
294
+ else:
295
+ events.append(mapped_events)
238
296
  else:
239
297
  # Graceful fallback for unknown content types
240
298
  events.append(await self._create_unknown_content_event(content, context))
@@ -247,6 +305,59 @@ class MessageMapper:
247
305
 
248
306
  return events
249
307
 
308
+ async def _convert_agent_response(self, response: Any, context: dict[str, Any]) -> Sequence[Any]:
309
+ """Convert complete AgentRunResponse to OpenAI events.
310
+
311
+ This handles non-streaming agent execution where agent.run() returns
312
+ a complete AgentRunResponse instead of streaming AgentRunResponseUpdate objects.
313
+
314
+ Args:
315
+ response: Agent run response (AgentRunResponse)
316
+ context: Conversion context
317
+
318
+ Returns:
319
+ List of OpenAI response stream events
320
+ """
321
+ events: list[Any] = []
322
+
323
+ try:
324
+ # Extract all messages from the response
325
+ messages = getattr(response, "messages", [])
326
+
327
+ # Convert each message's contents to streaming events
328
+ for message in messages:
329
+ if hasattr(message, "contents") and message.contents:
330
+ for content in message.contents:
331
+ content_type = content.__class__.__name__
332
+
333
+ if content_type in self.content_mappers:
334
+ mapped_events = await self.content_mappers[content_type](content, context)
335
+ if mapped_events is not None: # Handle None returns (e.g., UsageContent)
336
+ if isinstance(mapped_events, list):
337
+ events.extend(mapped_events)
338
+ else:
339
+ events.append(mapped_events)
340
+ else:
341
+ # Graceful fallback for unknown content types
342
+ events.append(await self._create_unknown_content_event(content, context))
343
+
344
+ context["content_index"] += 1
345
+
346
+ # Add usage information if present
347
+ usage_details = getattr(response, "usage_details", None)
348
+ if usage_details:
349
+ from agent_framework import UsageContent
350
+
351
+ usage_content = UsageContent(details=usage_details)
352
+ await self._map_usage_content(usage_content, context)
353
+ # Note: _map_usage_content returns None - it accumulates usage for final Response.usage
354
+
355
+ except Exception as e:
356
+ logger.warning(f"Error converting agent response: {e}")
357
+ events.append(await self._create_error_event(str(e), context))
358
+
359
+ return events
360
+
250
361
  async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> Sequence[Any]:
251
362
  """Convert workflow event to structured OpenAI events.
252
363
 
@@ -258,13 +369,22 @@ class MessageMapper:
258
369
  List of OpenAI response stream events
259
370
  """
260
371
  try:
261
- serialized_payload = self._serialize_payload(getattr(event, "data", None))
372
+ # Get event data and serialize if it's a SerializationMixin
373
+ event_data = getattr(event, "data", None)
374
+ if event_data is not None and hasattr(event_data, "to_dict"):
375
+ # SerializationMixin objects - convert to dict for JSON serialization
376
+ try:
377
+ event_data = event_data.to_dict()
378
+ except Exception as e:
379
+ logger.debug(f"Failed to serialize event data with to_dict(): {e}")
380
+ event_data = str(event_data)
381
+
262
382
  # Create structured workflow event
263
383
  workflow_event = ResponseWorkflowEventComplete(
264
384
  type="response.workflow_event.complete",
265
385
  data={
266
386
  "event_type": event.__class__.__name__,
267
- "data": serialized_payload,
387
+ "data": event_data,
268
388
  "executor_id": getattr(event, "executor_id", None),
269
389
  "timestamp": datetime.now().isoformat(),
270
390
  },
@@ -280,59 +400,6 @@ class MessageMapper:
280
400
  logger.warning(f"Error converting workflow event: {e}")
281
401
  return [await self._create_error_event(str(e), context)]
282
402
 
283
- def _serialize_payload(self, value: Any) -> Any:
284
- """Best-effort JSON serialization for workflow payloads."""
285
- if value is None:
286
- return None
287
- if isinstance(value, (str, int, float, bool)):
288
- return value
289
- if isinstance(value, (list, tuple, set)):
290
- return [self._serialize_payload(item) for item in value]
291
- if isinstance(value, dict):
292
- return {str(k): self._serialize_payload(v) for k, v in value.items()}
293
- if is_dataclass(value) and not isinstance(value, type):
294
- try:
295
- return self._serialize_payload(asdict(value))
296
- except Exception as exc:
297
- logger.debug("Failed to serialize dataclass payload: %s", exc)
298
- model_dump_method = getattr(value, "model_dump", None)
299
- if model_dump_method is not None and callable(model_dump_method):
300
- try:
301
- dumped = model_dump_method()
302
- return self._serialize_payload(dumped)
303
- except Exception as exc:
304
- logger.debug("Failed to serialize payload via model_dump: %s", exc)
305
- dict_method = getattr(value, "dict", None)
306
- if dict_method is not None and callable(dict_method):
307
- try:
308
- dict_result = dict_method()
309
- return self._serialize_payload(dict_result)
310
- except Exception as exc:
311
- logger.debug("Failed to serialize payload via dict(): %s", exc)
312
- to_dict_method = getattr(value, "to_dict", None)
313
- if to_dict_method is not None and callable(to_dict_method):
314
- try:
315
- to_dict_result = to_dict_method()
316
- return self._serialize_payload(to_dict_result)
317
- except Exception as exc:
318
- logger.debug("Failed to serialize payload via to_dict(): %s", exc)
319
- model_dump_json_method = getattr(value, "model_dump_json", None)
320
- if model_dump_json_method is not None and callable(model_dump_json_method):
321
- try:
322
- json_str = model_dump_json_method()
323
- if isinstance(json_str, (str, bytes, bytearray)):
324
- return json.loads(json_str)
325
- except Exception as exc:
326
- logger.debug("Failed to serialize payload via model_dump_json: %s", exc)
327
- if hasattr(value, "__dict__"):
328
- try:
329
- return self._serialize_payload({
330
- key: self._serialize_payload(val) for key, val in value.__dict__.items() if not key.startswith("_")
331
- })
332
- except Exception as exc:
333
- logger.debug("Failed to serialize payload via __dict__: %s", exc)
334
- return str(value)
335
-
336
403
  # Content type mappers - implementing our comprehensive mapping plan
337
404
 
338
405
  async def _map_text_content(self, content: Any, context: dict[str, Any]) -> ResponseTextDeltaEvent:
@@ -352,42 +419,141 @@ class MessageMapper:
352
419
 
353
420
  async def _map_function_call_content(
354
421
  self, content: Any, context: dict[str, Any]
355
- ) -> list[ResponseFunctionCallArgumentsDeltaEvent]:
356
- """Map FunctionCallContent to ResponseFunctionCallArgumentsDeltaEvent(s)."""
357
- events = []
422
+ ) -> list[ResponseFunctionCallArgumentsDeltaEvent | ResponseOutputItemAddedEvent]:
423
+ """Map FunctionCallContent to OpenAI events following Responses API spec.
424
+
425
+ Agent Framework emits FunctionCallContent in two patterns:
426
+ 1. First event: call_id + name + empty/no arguments
427
+ 2. Subsequent events: empty call_id/name + argument chunks
358
428
 
359
- # For streaming, need to chunk the arguments JSON
360
- args_str = json.dumps(content.arguments) if hasattr(content, "arguments") and content.arguments else "{}"
429
+ We emit:
430
+ 1. response.output_item.added (with full metadata) for the first event
431
+ 2. response.function_call_arguments.delta (referencing item_id) for chunks
432
+ """
433
+ events: list[ResponseFunctionCallArgumentsDeltaEvent | ResponseOutputItemAddedEvent] = []
434
+
435
+ # CASE 1: New function call (has call_id and name)
436
+ # This is the first event that establishes the function call
437
+ if content.call_id and content.name:
438
+ # Use call_id as item_id (simpler, and call_id uniquely identifies the call)
439
+ item_id = content.call_id
440
+
441
+ # Track this function call for later argument deltas
442
+ context["active_function_calls"][content.call_id] = {
443
+ "item_id": item_id,
444
+ "name": content.name,
445
+ "arguments_chunks": [],
446
+ }
447
+
448
+ logger.debug(f"New function call: {content.name} (call_id={content.call_id})")
361
449
 
362
- # Chunk the JSON string for streaming
363
- for chunk in self._chunk_json_string(args_str):
450
+ # Emit response.output_item.added event per OpenAI spec
364
451
  events.append(
365
- ResponseFunctionCallArgumentsDeltaEvent(
366
- type="response.function_call_arguments.delta",
367
- delta=chunk,
368
- item_id=context["item_id"],
452
+ ResponseOutputItemAddedEvent(
453
+ type="response.output_item.added",
454
+ item=ResponseFunctionToolCall(
455
+ id=content.call_id, # Use call_id as the item id
456
+ call_id=content.call_id,
457
+ name=content.name,
458
+ arguments="", # Empty initially, will be filled by deltas
459
+ type="function_call",
460
+ status="in_progress",
461
+ ),
369
462
  output_index=context["output_index"],
370
463
  sequence_number=self._next_sequence(context),
371
464
  )
372
465
  )
373
466
 
467
+ # CASE 2: Argument deltas (content has arguments, possibly without call_id/name)
468
+ if content.arguments:
469
+ # Find the active function call for these arguments
470
+ active_call = self._get_active_function_call(content, context)
471
+
472
+ if active_call:
473
+ item_id = active_call["item_id"]
474
+
475
+ # Convert arguments to string if it's a dict (Agent Framework may send either)
476
+ delta_str = content.arguments if isinstance(content.arguments, str) else json.dumps(content.arguments)
477
+
478
+ # Emit argument delta referencing the item_id
479
+ events.append(
480
+ ResponseFunctionCallArgumentsDeltaEvent(
481
+ type="response.function_call_arguments.delta",
482
+ delta=delta_str,
483
+ item_id=item_id,
484
+ output_index=context["output_index"],
485
+ sequence_number=self._next_sequence(context),
486
+ )
487
+ )
488
+
489
+ # Track chunk for debugging
490
+ active_call["arguments_chunks"].append(delta_str)
491
+ else:
492
+ logger.warning(f"Received function call arguments without active call: {content.arguments[:50]}...")
493
+
374
494
  return events
375
495
 
496
+ def _get_active_function_call(self, content: Any, context: dict[str, Any]) -> dict[str, Any] | None:
497
+ """Find the active function call for this content.
498
+
499
+ Uses call_id if present, otherwise falls back to most recent call.
500
+ Necessary because Agent Framework may send argument chunks without call_id.
501
+
502
+ Args:
503
+ content: FunctionCallContent with possible call_id
504
+ context: Conversion context with active_function_calls
505
+
506
+ Returns:
507
+ Active call dict or None
508
+ """
509
+ active_calls: dict[str, dict[str, Any]] = context["active_function_calls"]
510
+
511
+ # If content has call_id, use it to find the exact call
512
+ if hasattr(content, "call_id") and content.call_id:
513
+ result = active_calls.get(content.call_id)
514
+ return result if result is not None else None
515
+
516
+ # Otherwise, use the most recent call (last one added)
517
+ # This handles the case where Agent Framework sends argument chunks
518
+ # without call_id in subsequent events
519
+ if active_calls:
520
+ return list(active_calls.values())[-1]
521
+
522
+ return None
523
+
376
524
  async def _map_function_result_content(
377
525
  self, content: Any, context: dict[str, Any]
378
526
  ) -> ResponseFunctionResultComplete:
379
- """Map FunctionResultContent to structured event."""
527
+ """Map FunctionResultContent to DevUI custom event.
528
+
529
+ DevUI extension: The OpenAI Responses API doesn't stream function execution results
530
+ (in OpenAI's model, the application executes functions, not the API).
531
+ """
532
+ # Get call_id from content
533
+ call_id = getattr(content, "call_id", None)
534
+ if not call_id:
535
+ call_id = f"call_{uuid.uuid4().hex[:8]}"
536
+
537
+ # Extract result
538
+ result = getattr(content, "result", None)
539
+ exception = getattr(content, "exception", None)
540
+
541
+ # Convert result to string
542
+ output = result if isinstance(result, str) else json.dumps(result) if result is not None else ""
543
+
544
+ # Determine status based on exception
545
+ status = "incomplete" if exception else "completed"
546
+
547
+ # Generate item_id
548
+ item_id = f"item_{uuid.uuid4().hex[:8]}"
549
+
550
+ # Return DevUI custom event
380
551
  return ResponseFunctionResultComplete(
381
552
  type="response.function_result.complete",
382
- data={
383
- "call_id": getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"),
384
- "result": getattr(content, "result", None),
385
- "status": "completed" if not getattr(content, "exception", None) else "failed",
386
- "exception": str(getattr(content, "exception", None)) if getattr(content, "exception", None) else None,
387
- "timestamp": datetime.now().isoformat(),
388
- },
389
- call_id=getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"),
390
- item_id=context["item_id"],
553
+ call_id=call_id,
554
+ output=output,
555
+ status=status,
556
+ item_id=item_id,
391
557
  output_index=context["output_index"],
392
558
  sequence_number=self._next_sequence(context),
393
559
  )
@@ -402,26 +568,34 @@ class MessageMapper:
402
568
  sequence_number=self._next_sequence(context),
403
569
  )
404
570
 
405
- async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> ResponseUsageEventComplete:
406
- """Map UsageContent to structured usage event."""
407
- # Store usage data in context for aggregation
408
- if "usage_data" not in context:
409
- context["usage_data"] = []
410
- context["usage_data"].append(content)
571
+ async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> None:
572
+ """Accumulate usage data for final Response.usage field.
411
573
 
412
- return ResponseUsageEventComplete(
413
- type="response.usage.complete",
414
- data={
415
- "usage_data": getattr(content, "usage_data", {}),
416
- "total_tokens": getattr(content, "total_tokens", 0),
417
- "completion_tokens": getattr(content, "completion_tokens", 0),
418
- "prompt_tokens": getattr(content, "prompt_tokens", 0),
419
- "timestamp": datetime.now().isoformat(),
420
- },
421
- item_id=context["item_id"],
422
- output_index=context["output_index"],
423
- sequence_number=self._next_sequence(context),
424
- )
574
+ OpenAI does NOT stream usage events. Usage appears only in final Response.
575
+ This method accumulates usage data per request for later inclusion in Response.usage.
576
+
577
+ Returns:
578
+ None - no event emitted (usage goes in final Response.usage)
579
+ """
580
+ # Extract usage from UsageContent.details (UsageDetails object)
581
+ details = getattr(content, "details", None)
582
+ total_tokens = getattr(details, "total_token_count", 0) or 0
583
+ prompt_tokens = getattr(details, "input_token_count", 0) or 0
584
+ completion_tokens = getattr(details, "output_token_count", 0) or 0
585
+
586
+ # Accumulate for final Response.usage
587
+ request_id = context.get("request_id", "default")
588
+ if request_id not in self._usage_accumulator:
589
+ self._usage_accumulator[request_id] = {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
590
+
591
+ self._usage_accumulator[request_id]["input_tokens"] += prompt_tokens
592
+ self._usage_accumulator[request_id]["output_tokens"] += completion_tokens
593
+ self._usage_accumulator[request_id]["total_tokens"] += total_tokens
594
+
595
+ logger.debug(f"Accumulated usage for {request_id}: {self._usage_accumulator[request_id]}")
596
+
597
+ # NO EVENT RETURNED - usage goes in final Response only
598
+ return
425
599
 
426
600
  async def _map_data_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete:
427
601
  """Map DataContent to structured trace event."""
@@ -486,15 +660,24 @@ class MessageMapper:
486
660
 
487
661
  async def _map_approval_request_content(self, content: Any, context: dict[str, Any]) -> dict[str, Any]:
488
662
  """Map FunctionApprovalRequestContent to custom event."""
663
+ # Parse arguments to ensure they're always a dict, not a JSON string
664
+ # This prevents double-escaping when the frontend calls JSON.stringify()
665
+ arguments: dict[str, Any] = {}
666
+ if hasattr(content, "function_call"):
667
+ if hasattr(content.function_call, "parse_arguments"):
668
+ # Use parse_arguments() to convert string arguments to dict
669
+ arguments = content.function_call.parse_arguments() or {}
670
+ else:
671
+ # Fallback to direct access if parse_arguments doesn't exist
672
+ arguments = getattr(content.function_call, "arguments", {})
673
+
489
674
  return {
490
675
  "type": "response.function_approval.requested",
491
676
  "request_id": getattr(content, "id", "unknown"),
492
677
  "function_call": {
493
678
  "id": getattr(content.function_call, "call_id", "") if hasattr(content, "function_call") else "",
494
679
  "name": getattr(content.function_call, "name", "") if hasattr(content, "function_call") else "",
495
- "arguments": getattr(content.function_call, "arguments", {})
496
- if hasattr(content, "function_call")
497
- else {},
680
+ "arguments": arguments,
498
681
  },
499
682
  "item_id": context["item_id"],
500
683
  "output_index": context["output_index"],
@@ -534,19 +717,15 @@ class MessageMapper:
534
717
 
535
718
  async def _create_unknown_event(self, event_data: Any, context: dict[str, Any]) -> ResponseStreamEvent:
536
719
  """Create event for unknown event types."""
537
- text = f"Unknown event: {event_data!s}\\n"
720
+ text = f"Unknown event: {event_data!s}\n"
538
721
  return self._create_text_delta_event(text, context)
539
722
 
540
723
  async def _create_unknown_content_event(self, content: Any, context: dict[str, Any]) -> ResponseStreamEvent:
541
724
  """Create event for unknown content types."""
542
725
  content_type = content.__class__.__name__
543
- text = f"⚠️ Unknown content type: {content_type}\\n"
726
+ text = f"⚠️ Unknown content type: {content_type}\n"
544
727
  return self._create_text_delta_event(text, context)
545
728
 
546
- def _chunk_json_string(self, json_str: str, chunk_size: int = 50) -> list[str]:
547
- """Chunk JSON string for streaming."""
548
- return [json_str[i : i + chunk_size] for i in range(0, len(json_str), chunk_size)]
549
-
550
729
  async def _create_error_response(self, error_message: str, request: AgentFrameworkRequest) -> OpenAIResponse:
551
730
  """Create error response."""
552
731
  error_text = f"Error: {error_message}"