agent-framework-devui 0.0.1a0__py3-none-any.whl → 1.0.0b251007__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agent-framework-devui might be problematic. Click here for more details.

@@ -0,0 +1,558 @@
1
+ # Copyright (c) Microsoft. All rights reserved.
2
+
3
+ """Agent Framework message mapper implementation."""
4
+
5
+ import json
6
+ import logging
7
+ import uuid
8
+ from collections.abc import Sequence
9
+ from datetime import datetime
10
+ from typing import Any, Union
11
+
12
+ from .models import (
13
+ AgentFrameworkRequest,
14
+ InputTokensDetails,
15
+ OpenAIResponse,
16
+ OutputTokensDetails,
17
+ ResponseErrorEvent,
18
+ ResponseFunctionCallArgumentsDeltaEvent,
19
+ ResponseFunctionResultComplete,
20
+ ResponseOutputMessage,
21
+ ResponseOutputText,
22
+ ResponseReasoningTextDeltaEvent,
23
+ ResponseStreamEvent,
24
+ ResponseTextDeltaEvent,
25
+ ResponseTraceEventComplete,
26
+ ResponseUsage,
27
+ ResponseUsageEventComplete,
28
+ ResponseWorkflowEventComplete,
29
+ )
30
+
31
+ logger = logging.getLogger(__name__)
32
+
33
+ # Type alias for all possible event types
34
+ EventType = Union[
35
+ ResponseStreamEvent,
36
+ ResponseWorkflowEventComplete,
37
+ ResponseFunctionResultComplete,
38
+ ResponseTraceEventComplete,
39
+ ResponseUsageEventComplete,
40
+ ]
41
+
42
+
43
+ class MessageMapper:
44
+ """Maps Agent Framework messages/responses to OpenAI format."""
45
+
46
+ def __init__(self) -> None:
47
+ """Initialize Agent Framework message mapper."""
48
+ self.sequence_counter = 0
49
+ self._conversion_contexts: dict[int, dict[str, Any]] = {}
50
+
51
+ # Register content type mappers for all 12 Agent Framework content types
52
+ self.content_mappers = {
53
+ "TextContent": self._map_text_content,
54
+ "TextReasoningContent": self._map_reasoning_content,
55
+ "FunctionCallContent": self._map_function_call_content,
56
+ "FunctionResultContent": self._map_function_result_content,
57
+ "ErrorContent": self._map_error_content,
58
+ "UsageContent": self._map_usage_content,
59
+ "DataContent": self._map_data_content,
60
+ "UriContent": self._map_uri_content,
61
+ "HostedFileContent": self._map_hosted_file_content,
62
+ "HostedVectorStoreContent": self._map_hosted_vector_store_content,
63
+ "FunctionApprovalRequestContent": self._map_approval_request_content,
64
+ "FunctionApprovalResponseContent": self._map_approval_response_content,
65
+ }
66
+
67
+ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> Sequence[Any]:
68
+ """Convert a single Agent Framework event to OpenAI events.
69
+
70
+ Args:
71
+ raw_event: Agent Framework event (AgentRunResponseUpdate, WorkflowEvent, etc.)
72
+ request: Original request for context
73
+
74
+ Returns:
75
+ List of OpenAI response stream events
76
+ """
77
+ context = self._get_or_create_context(request)
78
+
79
+ # Handle error events
80
+ if isinstance(raw_event, dict) and raw_event.get("type") == "error":
81
+ return [await self._create_error_event(raw_event.get("message", "Unknown error"), context)]
82
+
83
+ # Handle ResponseTraceEvent objects from our trace collector
84
+ from .models import ResponseTraceEvent
85
+
86
+ if isinstance(raw_event, ResponseTraceEvent):
87
+ return [
88
+ ResponseTraceEventComplete(
89
+ type="response.trace.complete",
90
+ data=raw_event.data,
91
+ item_id=context["item_id"],
92
+ sequence_number=self._next_sequence(context),
93
+ )
94
+ ]
95
+
96
+ # Import Agent Framework types for proper isinstance checks
97
+ try:
98
+ from agent_framework import AgentRunResponseUpdate, WorkflowEvent
99
+ from agent_framework._workflows._events import AgentRunUpdateEvent
100
+
101
+ # Handle AgentRunUpdateEvent - workflow event wrapping AgentRunResponseUpdate
102
+ # This must be checked BEFORE generic WorkflowEvent check
103
+ if isinstance(raw_event, AgentRunUpdateEvent):
104
+ # Extract the AgentRunResponseUpdate from the event's data attribute
105
+ if raw_event.data and isinstance(raw_event.data, AgentRunResponseUpdate):
106
+ return await self._convert_agent_update(raw_event.data, context)
107
+ # If no data, treat as generic workflow event
108
+ return await self._convert_workflow_event(raw_event, context)
109
+
110
+ # Handle agent updates (AgentRunResponseUpdate) - for direct agent execution
111
+ if isinstance(raw_event, AgentRunResponseUpdate):
112
+ return await self._convert_agent_update(raw_event, context)
113
+
114
+ # Handle workflow events (any class that inherits from WorkflowEvent)
115
+ if isinstance(raw_event, WorkflowEvent):
116
+ return await self._convert_workflow_event(raw_event, context)
117
+
118
+ except ImportError as e:
119
+ logger.warning(f"Could not import Agent Framework types: {e}")
120
+ # Fallback to attribute-based detection
121
+ if hasattr(raw_event, "contents"):
122
+ return await self._convert_agent_update(raw_event, context)
123
+ if hasattr(raw_event, "__class__") and "Event" in raw_event.__class__.__name__:
124
+ return await self._convert_workflow_event(raw_event, context)
125
+
126
+ # Unknown event type
127
+ return [await self._create_unknown_event(raw_event, context)]
128
+
129
+ async def aggregate_to_response(self, events: Sequence[Any], request: AgentFrameworkRequest) -> OpenAIResponse:
130
+ """Aggregate streaming events into final OpenAI response.
131
+
132
+ Args:
133
+ events: List of OpenAI stream events
134
+ request: Original request for context
135
+
136
+ Returns:
137
+ Final aggregated OpenAI response
138
+ """
139
+ try:
140
+ # Extract text content from events
141
+ content_parts = []
142
+
143
+ for event in events:
144
+ # Extract delta text from ResponseTextDeltaEvent
145
+ if hasattr(event, "delta") and hasattr(event, "type") and event.type == "response.output_text.delta":
146
+ content_parts.append(event.delta)
147
+
148
+ # Combine content
149
+ full_content = "".join(content_parts)
150
+
151
+ # Create proper OpenAI Response
152
+ response_output_text = ResponseOutputText(type="output_text", text=full_content, annotations=[])
153
+
154
+ response_output_message = ResponseOutputMessage(
155
+ type="message",
156
+ role="assistant",
157
+ content=[response_output_text],
158
+ id=f"msg_{uuid.uuid4().hex[:8]}",
159
+ status="completed",
160
+ )
161
+
162
+ # Create usage object
163
+ input_token_count = len(str(request.input)) // 4 if request.input else 0
164
+ output_token_count = len(full_content) // 4
165
+
166
+ usage = ResponseUsage(
167
+ input_tokens=input_token_count,
168
+ output_tokens=output_token_count,
169
+ total_tokens=input_token_count + output_token_count,
170
+ input_tokens_details=InputTokensDetails(cached_tokens=0),
171
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
172
+ )
173
+
174
+ return OpenAIResponse(
175
+ id=f"resp_{uuid.uuid4().hex[:12]}",
176
+ object="response",
177
+ created_at=datetime.now().timestamp(),
178
+ model=request.model,
179
+ output=[response_output_message],
180
+ usage=usage,
181
+ parallel_tool_calls=False,
182
+ tool_choice="none",
183
+ tools=[],
184
+ )
185
+
186
+ except Exception as e:
187
+ logger.exception(f"Error aggregating response: {e}")
188
+ return await self._create_error_response(str(e), request)
189
+
190
+ def _get_or_create_context(self, request: AgentFrameworkRequest) -> dict[str, Any]:
191
+ """Get or create conversion context for this request.
192
+
193
+ Args:
194
+ request: Request to get context for
195
+
196
+ Returns:
197
+ Conversion context dictionary
198
+ """
199
+ request_key = id(request)
200
+ if request_key not in self._conversion_contexts:
201
+ self._conversion_contexts[request_key] = {
202
+ "sequence_counter": 0,
203
+ "item_id": f"msg_{uuid.uuid4().hex[:8]}",
204
+ "content_index": 0,
205
+ "output_index": 0,
206
+ }
207
+ return self._conversion_contexts[request_key]
208
+
209
+ def _next_sequence(self, context: dict[str, Any]) -> int:
210
+ """Get next sequence number for events.
211
+
212
+ Args:
213
+ context: Conversion context
214
+
215
+ Returns:
216
+ Next sequence number
217
+ """
218
+ context["sequence_counter"] += 1
219
+ return int(context["sequence_counter"])
220
+
221
+ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> Sequence[Any]:
222
+ """Convert AgentRunResponseUpdate to OpenAI events using comprehensive content mapping.
223
+
224
+ Args:
225
+ update: Agent run response update
226
+ context: Conversion context
227
+
228
+ Returns:
229
+ List of OpenAI response stream events
230
+ """
231
+ events: list[Any] = []
232
+
233
+ try:
234
+ # Handle different update types
235
+ if not hasattr(update, "contents") or not update.contents:
236
+ return events
237
+
238
+ for content in update.contents:
239
+ content_type = content.__class__.__name__
240
+
241
+ if content_type in self.content_mappers:
242
+ mapped_events = await self.content_mappers[content_type](content, context)
243
+ if isinstance(mapped_events, list):
244
+ events.extend(mapped_events)
245
+ else:
246
+ events.append(mapped_events)
247
+ else:
248
+ # Graceful fallback for unknown content types
249
+ events.append(await self._create_unknown_content_event(content, context))
250
+
251
+ context["content_index"] += 1
252
+
253
+ except Exception as e:
254
+ logger.warning(f"Error converting agent update: {e}")
255
+ events.append(await self._create_error_event(str(e), context))
256
+
257
+ return events
258
+
259
+ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> Sequence[Any]:
260
+ """Convert workflow event to structured OpenAI events.
261
+
262
+ Args:
263
+ event: Workflow event
264
+ context: Conversion context
265
+
266
+ Returns:
267
+ List of OpenAI response stream events
268
+ """
269
+ try:
270
+ # Get event data and serialize if it's a SerializationMixin
271
+ event_data = getattr(event, "data", None)
272
+ if event_data is not None and hasattr(event_data, "to_dict"):
273
+ # SerializationMixin objects - convert to dict for JSON serialization
274
+ try:
275
+ event_data = event_data.to_dict()
276
+ except Exception as e:
277
+ logger.debug(f"Failed to serialize event data with to_dict(): {e}")
278
+ event_data = str(event_data)
279
+
280
+ # Create structured workflow event
281
+ workflow_event = ResponseWorkflowEventComplete(
282
+ type="response.workflow_event.complete",
283
+ data={
284
+ "event_type": event.__class__.__name__,
285
+ "data": event_data,
286
+ "executor_id": getattr(event, "executor_id", None),
287
+ "timestamp": datetime.now().isoformat(),
288
+ },
289
+ executor_id=getattr(event, "executor_id", None),
290
+ item_id=context["item_id"],
291
+ output_index=context["output_index"],
292
+ sequence_number=self._next_sequence(context),
293
+ )
294
+
295
+ return [workflow_event]
296
+
297
+ except Exception as e:
298
+ logger.warning(f"Error converting workflow event: {e}")
299
+ return [await self._create_error_event(str(e), context)]
300
+
301
+ # Content type mappers - implementing our comprehensive mapping plan
302
+
303
+ async def _map_text_content(self, content: Any, context: dict[str, Any]) -> ResponseTextDeltaEvent:
304
+ """Map TextContent to ResponseTextDeltaEvent."""
305
+ return self._create_text_delta_event(content.text, context)
306
+
307
+ async def _map_reasoning_content(self, content: Any, context: dict[str, Any]) -> ResponseReasoningTextDeltaEvent:
308
+ """Map TextReasoningContent to ResponseReasoningTextDeltaEvent."""
309
+ return ResponseReasoningTextDeltaEvent(
310
+ type="response.reasoning_text.delta",
311
+ delta=content.text,
312
+ item_id=context["item_id"],
313
+ output_index=context["output_index"],
314
+ content_index=context["content_index"],
315
+ sequence_number=self._next_sequence(context),
316
+ )
317
+
318
+ async def _map_function_call_content(
319
+ self, content: Any, context: dict[str, Any]
320
+ ) -> list[ResponseFunctionCallArgumentsDeltaEvent]:
321
+ """Map FunctionCallContent to ResponseFunctionCallArgumentsDeltaEvent(s)."""
322
+ events = []
323
+
324
+ # For streaming, need to chunk the arguments JSON
325
+ args_str = json.dumps(content.arguments) if hasattr(content, "arguments") and content.arguments else "{}"
326
+
327
+ # Chunk the JSON string for streaming
328
+ for chunk in self._chunk_json_string(args_str):
329
+ events.append(
330
+ ResponseFunctionCallArgumentsDeltaEvent(
331
+ type="response.function_call_arguments.delta",
332
+ delta=chunk,
333
+ item_id=context["item_id"],
334
+ output_index=context["output_index"],
335
+ sequence_number=self._next_sequence(context),
336
+ )
337
+ )
338
+
339
+ return events
340
+
341
+ async def _map_function_result_content(
342
+ self, content: Any, context: dict[str, Any]
343
+ ) -> ResponseFunctionResultComplete:
344
+ """Map FunctionResultContent to structured event."""
345
+ return ResponseFunctionResultComplete(
346
+ type="response.function_result.complete",
347
+ data={
348
+ "call_id": getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"),
349
+ "result": getattr(content, "result", None),
350
+ "status": "completed" if not getattr(content, "exception", None) else "failed",
351
+ "exception": str(getattr(content, "exception", None)) if getattr(content, "exception", None) else None,
352
+ "timestamp": datetime.now().isoformat(),
353
+ },
354
+ call_id=getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"),
355
+ item_id=context["item_id"],
356
+ output_index=context["output_index"],
357
+ sequence_number=self._next_sequence(context),
358
+ )
359
+
360
+ async def _map_error_content(self, content: Any, context: dict[str, Any]) -> ResponseErrorEvent:
361
+ """Map ErrorContent to ResponseErrorEvent."""
362
+ return ResponseErrorEvent(
363
+ type="error",
364
+ message=getattr(content, "message", "Unknown error"),
365
+ code=getattr(content, "error_code", None),
366
+ param=None,
367
+ sequence_number=self._next_sequence(context),
368
+ )
369
+
370
+ async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> ResponseUsageEventComplete:
371
+ """Map UsageContent to structured usage event."""
372
+ # Store usage data in context for aggregation
373
+ if "usage_data" not in context:
374
+ context["usage_data"] = []
375
+ context["usage_data"].append(content)
376
+
377
+ # Extract usage from UsageContent.details (UsageDetails object)
378
+ details = getattr(content, "details", None)
379
+ total_tokens = 0
380
+ prompt_tokens = 0
381
+ completion_tokens = 0
382
+
383
+ if details:
384
+ total_tokens = getattr(details, "total_token_count", 0) or 0
385
+ prompt_tokens = getattr(details, "input_token_count", 0) or 0
386
+ completion_tokens = getattr(details, "output_token_count", 0) or 0
387
+
388
+ return ResponseUsageEventComplete(
389
+ type="response.usage.complete",
390
+ data={
391
+ "usage_data": details.to_dict() if details and hasattr(details, "to_dict") else {},
392
+ "total_tokens": total_tokens,
393
+ "completion_tokens": completion_tokens,
394
+ "prompt_tokens": prompt_tokens,
395
+ "timestamp": datetime.now().isoformat(),
396
+ },
397
+ item_id=context["item_id"],
398
+ output_index=context["output_index"],
399
+ sequence_number=self._next_sequence(context),
400
+ )
401
+
402
+ async def _map_data_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete:
403
+ """Map DataContent to structured trace event."""
404
+ return ResponseTraceEventComplete(
405
+ type="response.trace.complete",
406
+ data={
407
+ "content_type": "data",
408
+ "data": getattr(content, "data", None),
409
+ "mime_type": getattr(content, "mime_type", "application/octet-stream"),
410
+ "size_bytes": len(str(getattr(content, "data", ""))) if getattr(content, "data", None) else 0,
411
+ "timestamp": datetime.now().isoformat(),
412
+ },
413
+ item_id=context["item_id"],
414
+ output_index=context["output_index"],
415
+ sequence_number=self._next_sequence(context),
416
+ )
417
+
418
+ async def _map_uri_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete:
419
+ """Map UriContent to structured trace event."""
420
+ return ResponseTraceEventComplete(
421
+ type="response.trace.complete",
422
+ data={
423
+ "content_type": "uri",
424
+ "uri": getattr(content, "uri", ""),
425
+ "mime_type": getattr(content, "mime_type", "text/plain"),
426
+ "timestamp": datetime.now().isoformat(),
427
+ },
428
+ item_id=context["item_id"],
429
+ output_index=context["output_index"],
430
+ sequence_number=self._next_sequence(context),
431
+ )
432
+
433
+ async def _map_hosted_file_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete:
434
+ """Map HostedFileContent to structured trace event."""
435
+ return ResponseTraceEventComplete(
436
+ type="response.trace.complete",
437
+ data={
438
+ "content_type": "hosted_file",
439
+ "file_id": getattr(content, "file_id", "unknown"),
440
+ "timestamp": datetime.now().isoformat(),
441
+ },
442
+ item_id=context["item_id"],
443
+ output_index=context["output_index"],
444
+ sequence_number=self._next_sequence(context),
445
+ )
446
+
447
+ async def _map_hosted_vector_store_content(
448
+ self, content: Any, context: dict[str, Any]
449
+ ) -> ResponseTraceEventComplete:
450
+ """Map HostedVectorStoreContent to structured trace event."""
451
+ return ResponseTraceEventComplete(
452
+ type="response.trace.complete",
453
+ data={
454
+ "content_type": "hosted_vector_store",
455
+ "vector_store_id": getattr(content, "vector_store_id", "unknown"),
456
+ "timestamp": datetime.now().isoformat(),
457
+ },
458
+ item_id=context["item_id"],
459
+ output_index=context["output_index"],
460
+ sequence_number=self._next_sequence(context),
461
+ )
462
+
463
+ async def _map_approval_request_content(self, content: Any, context: dict[str, Any]) -> dict[str, Any]:
464
+ """Map FunctionApprovalRequestContent to custom event."""
465
+ return {
466
+ "type": "response.function_approval.requested",
467
+ "request_id": getattr(content, "id", "unknown"),
468
+ "function_call": {
469
+ "id": getattr(content.function_call, "call_id", "") if hasattr(content, "function_call") else "",
470
+ "name": getattr(content.function_call, "name", "") if hasattr(content, "function_call") else "",
471
+ "arguments": getattr(content.function_call, "arguments", {})
472
+ if hasattr(content, "function_call")
473
+ else {},
474
+ },
475
+ "item_id": context["item_id"],
476
+ "output_index": context["output_index"],
477
+ "sequence_number": self._next_sequence(context),
478
+ }
479
+
480
+ async def _map_approval_response_content(self, content: Any, context: dict[str, Any]) -> dict[str, Any]:
481
+ """Map FunctionApprovalResponseContent to custom event."""
482
+ return {
483
+ "type": "response.function_approval.responded",
484
+ "request_id": getattr(content, "request_id", "unknown"),
485
+ "approved": getattr(content, "approved", False),
486
+ "item_id": context["item_id"],
487
+ "output_index": context["output_index"],
488
+ "sequence_number": self._next_sequence(context),
489
+ }
490
+
491
+ # Helper methods
492
+
493
+ def _create_text_delta_event(self, text: str, context: dict[str, Any]) -> ResponseTextDeltaEvent:
494
+ """Create a ResponseTextDeltaEvent."""
495
+ return ResponseTextDeltaEvent(
496
+ type="response.output_text.delta",
497
+ item_id=context["item_id"],
498
+ output_index=context["output_index"],
499
+ content_index=context["content_index"],
500
+ delta=text,
501
+ sequence_number=self._next_sequence(context),
502
+ logprobs=[],
503
+ )
504
+
505
+ async def _create_error_event(self, message: str, context: dict[str, Any]) -> ResponseErrorEvent:
506
+ """Create a ResponseErrorEvent."""
507
+ return ResponseErrorEvent(
508
+ type="error", message=message, code=None, param=None, sequence_number=self._next_sequence(context)
509
+ )
510
+
511
+ async def _create_unknown_event(self, event_data: Any, context: dict[str, Any]) -> ResponseStreamEvent:
512
+ """Create event for unknown event types."""
513
+ text = f"Unknown event: {event_data!s}\\n"
514
+ return self._create_text_delta_event(text, context)
515
+
516
+ async def _create_unknown_content_event(self, content: Any, context: dict[str, Any]) -> ResponseStreamEvent:
517
+ """Create event for unknown content types."""
518
+ content_type = content.__class__.__name__
519
+ text = f"⚠️ Unknown content type: {content_type}\\n"
520
+ return self._create_text_delta_event(text, context)
521
+
522
+ def _chunk_json_string(self, json_str: str, chunk_size: int = 50) -> list[str]:
523
+ """Chunk JSON string for streaming."""
524
+ return [json_str[i : i + chunk_size] for i in range(0, len(json_str), chunk_size)]
525
+
526
+ async def _create_error_response(self, error_message: str, request: AgentFrameworkRequest) -> OpenAIResponse:
527
+ """Create error response."""
528
+ error_text = f"Error: {error_message}"
529
+
530
+ response_output_text = ResponseOutputText(type="output_text", text=error_text, annotations=[])
531
+
532
+ response_output_message = ResponseOutputMessage(
533
+ type="message",
534
+ role="assistant",
535
+ content=[response_output_text],
536
+ id=f"msg_{uuid.uuid4().hex[:8]}",
537
+ status="completed",
538
+ )
539
+
540
+ usage = ResponseUsage(
541
+ input_tokens=0,
542
+ output_tokens=0,
543
+ total_tokens=0,
544
+ input_tokens_details=InputTokensDetails(cached_tokens=0),
545
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
546
+ )
547
+
548
+ return OpenAIResponse(
549
+ id=f"resp_{uuid.uuid4().hex[:12]}",
550
+ object="response",
551
+ created_at=datetime.now().timestamp(),
552
+ model=request.model,
553
+ output=[response_output_message],
554
+ usage=usage,
555
+ parallel_tool_calls=False,
556
+ tool_choice="none",
557
+ tools=[],
558
+ )