agent-framework-devui 0.0.1a0__py3-none-any.whl → 1.0.0b251001__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of agent-framework-devui might be problematic. Click here for more details.

@@ -0,0 +1,582 @@
1
+ # Copyright (c) Microsoft. All rights reserved.
2
+
3
+ """Agent Framework message mapper implementation."""
4
+
5
+ import json
6
+ import logging
7
+ import uuid
8
+ from collections.abc import Sequence
9
+ from dataclasses import asdict, is_dataclass
10
+ from datetime import datetime
11
+ from typing import Any, Union
12
+
13
+ from .models import (
14
+ AgentFrameworkRequest,
15
+ InputTokensDetails,
16
+ OpenAIResponse,
17
+ OutputTokensDetails,
18
+ ResponseErrorEvent,
19
+ ResponseFunctionCallArgumentsDeltaEvent,
20
+ ResponseFunctionResultComplete,
21
+ ResponseOutputMessage,
22
+ ResponseOutputText,
23
+ ResponseReasoningTextDeltaEvent,
24
+ ResponseStreamEvent,
25
+ ResponseTextDeltaEvent,
26
+ ResponseTraceEventComplete,
27
+ ResponseUsage,
28
+ ResponseUsageEventComplete,
29
+ ResponseWorkflowEventComplete,
30
+ )
31
+
32
+ logger = logging.getLogger(__name__)
33
+
34
+ # Type alias for all possible event types
35
+ EventType = Union[
36
+ ResponseStreamEvent,
37
+ ResponseWorkflowEventComplete,
38
+ ResponseFunctionResultComplete,
39
+ ResponseTraceEventComplete,
40
+ ResponseUsageEventComplete,
41
+ ]
42
+
43
+
44
+ class MessageMapper:
45
+ """Maps Agent Framework messages/responses to OpenAI format."""
46
+
47
+ def __init__(self) -> None:
48
+ """Initialize Agent Framework message mapper."""
49
+ self.sequence_counter = 0
50
+ self._conversion_contexts: dict[int, dict[str, Any]] = {}
51
+
52
+ # Register content type mappers for all 12 Agent Framework content types
53
+ self.content_mappers = {
54
+ "TextContent": self._map_text_content,
55
+ "TextReasoningContent": self._map_reasoning_content,
56
+ "FunctionCallContent": self._map_function_call_content,
57
+ "FunctionResultContent": self._map_function_result_content,
58
+ "ErrorContent": self._map_error_content,
59
+ "UsageContent": self._map_usage_content,
60
+ "DataContent": self._map_data_content,
61
+ "UriContent": self._map_uri_content,
62
+ "HostedFileContent": self._map_hosted_file_content,
63
+ "HostedVectorStoreContent": self._map_hosted_vector_store_content,
64
+ "FunctionApprovalRequestContent": self._map_approval_request_content,
65
+ "FunctionApprovalResponseContent": self._map_approval_response_content,
66
+ }
67
+
68
+ async def convert_event(self, raw_event: Any, request: AgentFrameworkRequest) -> Sequence[Any]:
69
+ """Convert a single Agent Framework event to OpenAI events.
70
+
71
+ Args:
72
+ raw_event: Agent Framework event (AgentRunResponseUpdate, WorkflowEvent, etc.)
73
+ request: Original request for context
74
+
75
+ Returns:
76
+ List of OpenAI response stream events
77
+ """
78
+ context = self._get_or_create_context(request)
79
+
80
+ # Handle error events
81
+ if isinstance(raw_event, dict) and raw_event.get("type") == "error":
82
+ return [await self._create_error_event(raw_event.get("message", "Unknown error"), context)]
83
+
84
+ # Handle ResponseTraceEvent objects from our trace collector
85
+ from .models import ResponseTraceEvent
86
+
87
+ if isinstance(raw_event, ResponseTraceEvent):
88
+ return [
89
+ ResponseTraceEventComplete(
90
+ type="response.trace.complete",
91
+ data=raw_event.data,
92
+ item_id=context["item_id"],
93
+ sequence_number=self._next_sequence(context),
94
+ )
95
+ ]
96
+
97
+ # Import Agent Framework types for proper isinstance checks
98
+ try:
99
+ from agent_framework import AgentRunResponseUpdate, WorkflowEvent
100
+
101
+ # Handle agent updates (AgentRunResponseUpdate)
102
+ if isinstance(raw_event, AgentRunResponseUpdate):
103
+ return await self._convert_agent_update(raw_event, context)
104
+
105
+ # Handle workflow events (any class that inherits from WorkflowEvent)
106
+ if isinstance(raw_event, WorkflowEvent):
107
+ return await self._convert_workflow_event(raw_event, context)
108
+
109
+ except ImportError as e:
110
+ logger.warning(f"Could not import Agent Framework types: {e}")
111
+ # Fallback to attribute-based detection
112
+ if hasattr(raw_event, "contents"):
113
+ return await self._convert_agent_update(raw_event, context)
114
+ if hasattr(raw_event, "__class__") and "Event" in raw_event.__class__.__name__:
115
+ return await self._convert_workflow_event(raw_event, context)
116
+
117
+ # Unknown event type
118
+ return [await self._create_unknown_event(raw_event, context)]
119
+
120
+ async def aggregate_to_response(self, events: Sequence[Any], request: AgentFrameworkRequest) -> OpenAIResponse:
121
+ """Aggregate streaming events into final OpenAI response.
122
+
123
+ Args:
124
+ events: List of OpenAI stream events
125
+ request: Original request for context
126
+
127
+ Returns:
128
+ Final aggregated OpenAI response
129
+ """
130
+ try:
131
+ # Extract text content from events
132
+ content_parts = []
133
+
134
+ for event in events:
135
+ # Extract delta text from ResponseTextDeltaEvent
136
+ if hasattr(event, "delta") and hasattr(event, "type") and event.type == "response.output_text.delta":
137
+ content_parts.append(event.delta)
138
+
139
+ # Combine content
140
+ full_content = "".join(content_parts)
141
+
142
+ # Create proper OpenAI Response
143
+ response_output_text = ResponseOutputText(type="output_text", text=full_content, annotations=[])
144
+
145
+ response_output_message = ResponseOutputMessage(
146
+ type="message",
147
+ role="assistant",
148
+ content=[response_output_text],
149
+ id=f"msg_{uuid.uuid4().hex[:8]}",
150
+ status="completed",
151
+ )
152
+
153
+ # Create usage object
154
+ input_token_count = len(str(request.input)) // 4 if request.input else 0
155
+ output_token_count = len(full_content) // 4
156
+
157
+ usage = ResponseUsage(
158
+ input_tokens=input_token_count,
159
+ output_tokens=output_token_count,
160
+ total_tokens=input_token_count + output_token_count,
161
+ input_tokens_details=InputTokensDetails(cached_tokens=0),
162
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
163
+ )
164
+
165
+ return OpenAIResponse(
166
+ id=f"resp_{uuid.uuid4().hex[:12]}",
167
+ object="response",
168
+ created_at=datetime.now().timestamp(),
169
+ model=request.model,
170
+ output=[response_output_message],
171
+ usage=usage,
172
+ parallel_tool_calls=False,
173
+ tool_choice="none",
174
+ tools=[],
175
+ )
176
+
177
+ except Exception as e:
178
+ logger.exception(f"Error aggregating response: {e}")
179
+ return await self._create_error_response(str(e), request)
180
+
181
+ def _get_or_create_context(self, request: AgentFrameworkRequest) -> dict[str, Any]:
182
+ """Get or create conversion context for this request.
183
+
184
+ Args:
185
+ request: Request to get context for
186
+
187
+ Returns:
188
+ Conversion context dictionary
189
+ """
190
+ request_key = id(request)
191
+ if request_key not in self._conversion_contexts:
192
+ self._conversion_contexts[request_key] = {
193
+ "sequence_counter": 0,
194
+ "item_id": f"msg_{uuid.uuid4().hex[:8]}",
195
+ "content_index": 0,
196
+ "output_index": 0,
197
+ }
198
+ return self._conversion_contexts[request_key]
199
+
200
+ def _next_sequence(self, context: dict[str, Any]) -> int:
201
+ """Get next sequence number for events.
202
+
203
+ Args:
204
+ context: Conversion context
205
+
206
+ Returns:
207
+ Next sequence number
208
+ """
209
+ context["sequence_counter"] += 1
210
+ return int(context["sequence_counter"])
211
+
212
+ async def _convert_agent_update(self, update: Any, context: dict[str, Any]) -> Sequence[Any]:
213
+ """Convert AgentRunResponseUpdate to OpenAI events using comprehensive content mapping.
214
+
215
+ Args:
216
+ update: Agent run response update
217
+ context: Conversion context
218
+
219
+ Returns:
220
+ List of OpenAI response stream events
221
+ """
222
+ events: list[Any] = []
223
+
224
+ try:
225
+ # Handle different update types
226
+ if not hasattr(update, "contents") or not update.contents:
227
+ return events
228
+
229
+ for content in update.contents:
230
+ content_type = content.__class__.__name__
231
+
232
+ if content_type in self.content_mappers:
233
+ mapped_events = await self.content_mappers[content_type](content, context)
234
+ if isinstance(mapped_events, list):
235
+ events.extend(mapped_events)
236
+ else:
237
+ events.append(mapped_events)
238
+ else:
239
+ # Graceful fallback for unknown content types
240
+ events.append(await self._create_unknown_content_event(content, context))
241
+
242
+ context["content_index"] += 1
243
+
244
+ except Exception as e:
245
+ logger.warning(f"Error converting agent update: {e}")
246
+ events.append(await self._create_error_event(str(e), context))
247
+
248
+ return events
249
+
250
+ async def _convert_workflow_event(self, event: Any, context: dict[str, Any]) -> Sequence[Any]:
251
+ """Convert workflow event to structured OpenAI events.
252
+
253
+ Args:
254
+ event: Workflow event
255
+ context: Conversion context
256
+
257
+ Returns:
258
+ List of OpenAI response stream events
259
+ """
260
+ try:
261
+ serialized_payload = self._serialize_payload(getattr(event, "data", None))
262
+ # Create structured workflow event
263
+ workflow_event = ResponseWorkflowEventComplete(
264
+ type="response.workflow_event.complete",
265
+ data={
266
+ "event_type": event.__class__.__name__,
267
+ "data": serialized_payload,
268
+ "executor_id": getattr(event, "executor_id", None),
269
+ "timestamp": datetime.now().isoformat(),
270
+ },
271
+ executor_id=getattr(event, "executor_id", None),
272
+ item_id=context["item_id"],
273
+ output_index=context["output_index"],
274
+ sequence_number=self._next_sequence(context),
275
+ )
276
+
277
+ return [workflow_event]
278
+
279
+ except Exception as e:
280
+ logger.warning(f"Error converting workflow event: {e}")
281
+ return [await self._create_error_event(str(e), context)]
282
+
283
+ def _serialize_payload(self, value: Any) -> Any:
284
+ """Best-effort JSON serialization for workflow payloads."""
285
+ if value is None:
286
+ return None
287
+ if isinstance(value, (str, int, float, bool)):
288
+ return value
289
+ if isinstance(value, (list, tuple, set)):
290
+ return [self._serialize_payload(item) for item in value]
291
+ if isinstance(value, dict):
292
+ return {str(k): self._serialize_payload(v) for k, v in value.items()}
293
+ if is_dataclass(value) and not isinstance(value, type):
294
+ try:
295
+ return self._serialize_payload(asdict(value))
296
+ except Exception as exc:
297
+ logger.debug("Failed to serialize dataclass payload: %s", exc)
298
+ model_dump_method = getattr(value, "model_dump", None)
299
+ if model_dump_method is not None and callable(model_dump_method):
300
+ try:
301
+ dumped = model_dump_method()
302
+ return self._serialize_payload(dumped)
303
+ except Exception as exc:
304
+ logger.debug("Failed to serialize payload via model_dump: %s", exc)
305
+ dict_method = getattr(value, "dict", None)
306
+ if dict_method is not None and callable(dict_method):
307
+ try:
308
+ dict_result = dict_method()
309
+ return self._serialize_payload(dict_result)
310
+ except Exception as exc:
311
+ logger.debug("Failed to serialize payload via dict(): %s", exc)
312
+ to_dict_method = getattr(value, "to_dict", None)
313
+ if to_dict_method is not None and callable(to_dict_method):
314
+ try:
315
+ to_dict_result = to_dict_method()
316
+ return self._serialize_payload(to_dict_result)
317
+ except Exception as exc:
318
+ logger.debug("Failed to serialize payload via to_dict(): %s", exc)
319
+ model_dump_json_method = getattr(value, "model_dump_json", None)
320
+ if model_dump_json_method is not None and callable(model_dump_json_method):
321
+ try:
322
+ json_str = model_dump_json_method()
323
+ if isinstance(json_str, (str, bytes, bytearray)):
324
+ return json.loads(json_str)
325
+ except Exception as exc:
326
+ logger.debug("Failed to serialize payload via model_dump_json: %s", exc)
327
+ if hasattr(value, "__dict__"):
328
+ try:
329
+ return self._serialize_payload({
330
+ key: self._serialize_payload(val) for key, val in value.__dict__.items() if not key.startswith("_")
331
+ })
332
+ except Exception as exc:
333
+ logger.debug("Failed to serialize payload via __dict__: %s", exc)
334
+ return str(value)
335
+
336
+ # Content type mappers - implementing our comprehensive mapping plan
337
+
338
+ async def _map_text_content(self, content: Any, context: dict[str, Any]) -> ResponseTextDeltaEvent:
339
+ """Map TextContent to ResponseTextDeltaEvent."""
340
+ return self._create_text_delta_event(content.text, context)
341
+
342
+ async def _map_reasoning_content(self, content: Any, context: dict[str, Any]) -> ResponseReasoningTextDeltaEvent:
343
+ """Map TextReasoningContent to ResponseReasoningTextDeltaEvent."""
344
+ return ResponseReasoningTextDeltaEvent(
345
+ type="response.reasoning_text.delta",
346
+ delta=content.text,
347
+ item_id=context["item_id"],
348
+ output_index=context["output_index"],
349
+ content_index=context["content_index"],
350
+ sequence_number=self._next_sequence(context),
351
+ )
352
+
353
+ async def _map_function_call_content(
354
+ self, content: Any, context: dict[str, Any]
355
+ ) -> list[ResponseFunctionCallArgumentsDeltaEvent]:
356
+ """Map FunctionCallContent to ResponseFunctionCallArgumentsDeltaEvent(s)."""
357
+ events = []
358
+
359
+ # For streaming, need to chunk the arguments JSON
360
+ args_str = json.dumps(content.arguments) if hasattr(content, "arguments") and content.arguments else "{}"
361
+
362
+ # Chunk the JSON string for streaming
363
+ for chunk in self._chunk_json_string(args_str):
364
+ events.append(
365
+ ResponseFunctionCallArgumentsDeltaEvent(
366
+ type="response.function_call_arguments.delta",
367
+ delta=chunk,
368
+ item_id=context["item_id"],
369
+ output_index=context["output_index"],
370
+ sequence_number=self._next_sequence(context),
371
+ )
372
+ )
373
+
374
+ return events
375
+
376
+ async def _map_function_result_content(
377
+ self, content: Any, context: dict[str, Any]
378
+ ) -> ResponseFunctionResultComplete:
379
+ """Map FunctionResultContent to structured event."""
380
+ return ResponseFunctionResultComplete(
381
+ type="response.function_result.complete",
382
+ data={
383
+ "call_id": getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"),
384
+ "result": getattr(content, "result", None),
385
+ "status": "completed" if not getattr(content, "exception", None) else "failed",
386
+ "exception": str(getattr(content, "exception", None)) if getattr(content, "exception", None) else None,
387
+ "timestamp": datetime.now().isoformat(),
388
+ },
389
+ call_id=getattr(content, "call_id", f"call_{uuid.uuid4().hex[:8]}"),
390
+ item_id=context["item_id"],
391
+ output_index=context["output_index"],
392
+ sequence_number=self._next_sequence(context),
393
+ )
394
+
395
+ async def _map_error_content(self, content: Any, context: dict[str, Any]) -> ResponseErrorEvent:
396
+ """Map ErrorContent to ResponseErrorEvent."""
397
+ return ResponseErrorEvent(
398
+ type="error",
399
+ message=getattr(content, "message", "Unknown error"),
400
+ code=getattr(content, "error_code", None),
401
+ param=None,
402
+ sequence_number=self._next_sequence(context),
403
+ )
404
+
405
+ async def _map_usage_content(self, content: Any, context: dict[str, Any]) -> ResponseUsageEventComplete:
406
+ """Map UsageContent to structured usage event."""
407
+ # Store usage data in context for aggregation
408
+ if "usage_data" not in context:
409
+ context["usage_data"] = []
410
+ context["usage_data"].append(content)
411
+
412
+ return ResponseUsageEventComplete(
413
+ type="response.usage.complete",
414
+ data={
415
+ "usage_data": getattr(content, "usage_data", {}),
416
+ "total_tokens": getattr(content, "total_tokens", 0),
417
+ "completion_tokens": getattr(content, "completion_tokens", 0),
418
+ "prompt_tokens": getattr(content, "prompt_tokens", 0),
419
+ "timestamp": datetime.now().isoformat(),
420
+ },
421
+ item_id=context["item_id"],
422
+ output_index=context["output_index"],
423
+ sequence_number=self._next_sequence(context),
424
+ )
425
+
426
+ async def _map_data_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete:
427
+ """Map DataContent to structured trace event."""
428
+ return ResponseTraceEventComplete(
429
+ type="response.trace.complete",
430
+ data={
431
+ "content_type": "data",
432
+ "data": getattr(content, "data", None),
433
+ "mime_type": getattr(content, "mime_type", "application/octet-stream"),
434
+ "size_bytes": len(str(getattr(content, "data", ""))) if getattr(content, "data", None) else 0,
435
+ "timestamp": datetime.now().isoformat(),
436
+ },
437
+ item_id=context["item_id"],
438
+ output_index=context["output_index"],
439
+ sequence_number=self._next_sequence(context),
440
+ )
441
+
442
+ async def _map_uri_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete:
443
+ """Map UriContent to structured trace event."""
444
+ return ResponseTraceEventComplete(
445
+ type="response.trace.complete",
446
+ data={
447
+ "content_type": "uri",
448
+ "uri": getattr(content, "uri", ""),
449
+ "mime_type": getattr(content, "mime_type", "text/plain"),
450
+ "timestamp": datetime.now().isoformat(),
451
+ },
452
+ item_id=context["item_id"],
453
+ output_index=context["output_index"],
454
+ sequence_number=self._next_sequence(context),
455
+ )
456
+
457
+ async def _map_hosted_file_content(self, content: Any, context: dict[str, Any]) -> ResponseTraceEventComplete:
458
+ """Map HostedFileContent to structured trace event."""
459
+ return ResponseTraceEventComplete(
460
+ type="response.trace.complete",
461
+ data={
462
+ "content_type": "hosted_file",
463
+ "file_id": getattr(content, "file_id", "unknown"),
464
+ "timestamp": datetime.now().isoformat(),
465
+ },
466
+ item_id=context["item_id"],
467
+ output_index=context["output_index"],
468
+ sequence_number=self._next_sequence(context),
469
+ )
470
+
471
+ async def _map_hosted_vector_store_content(
472
+ self, content: Any, context: dict[str, Any]
473
+ ) -> ResponseTraceEventComplete:
474
+ """Map HostedVectorStoreContent to structured trace event."""
475
+ return ResponseTraceEventComplete(
476
+ type="response.trace.complete",
477
+ data={
478
+ "content_type": "hosted_vector_store",
479
+ "vector_store_id": getattr(content, "vector_store_id", "unknown"),
480
+ "timestamp": datetime.now().isoformat(),
481
+ },
482
+ item_id=context["item_id"],
483
+ output_index=context["output_index"],
484
+ sequence_number=self._next_sequence(context),
485
+ )
486
+
487
+ async def _map_approval_request_content(self, content: Any, context: dict[str, Any]) -> dict[str, Any]:
488
+ """Map FunctionApprovalRequestContent to custom event."""
489
+ return {
490
+ "type": "response.function_approval.requested",
491
+ "request_id": getattr(content, "id", "unknown"),
492
+ "function_call": {
493
+ "id": getattr(content.function_call, "call_id", "") if hasattr(content, "function_call") else "",
494
+ "name": getattr(content.function_call, "name", "") if hasattr(content, "function_call") else "",
495
+ "arguments": getattr(content.function_call, "arguments", {})
496
+ if hasattr(content, "function_call")
497
+ else {},
498
+ },
499
+ "item_id": context["item_id"],
500
+ "output_index": context["output_index"],
501
+ "sequence_number": self._next_sequence(context),
502
+ }
503
+
504
+ async def _map_approval_response_content(self, content: Any, context: dict[str, Any]) -> dict[str, Any]:
505
+ """Map FunctionApprovalResponseContent to custom event."""
506
+ return {
507
+ "type": "response.function_approval.responded",
508
+ "request_id": getattr(content, "request_id", "unknown"),
509
+ "approved": getattr(content, "approved", False),
510
+ "item_id": context["item_id"],
511
+ "output_index": context["output_index"],
512
+ "sequence_number": self._next_sequence(context),
513
+ }
514
+
515
+ # Helper methods
516
+
517
+ def _create_text_delta_event(self, text: str, context: dict[str, Any]) -> ResponseTextDeltaEvent:
518
+ """Create a ResponseTextDeltaEvent."""
519
+ return ResponseTextDeltaEvent(
520
+ type="response.output_text.delta",
521
+ item_id=context["item_id"],
522
+ output_index=context["output_index"],
523
+ content_index=context["content_index"],
524
+ delta=text,
525
+ sequence_number=self._next_sequence(context),
526
+ logprobs=[],
527
+ )
528
+
529
+ async def _create_error_event(self, message: str, context: dict[str, Any]) -> ResponseErrorEvent:
530
+ """Create a ResponseErrorEvent."""
531
+ return ResponseErrorEvent(
532
+ type="error", message=message, code=None, param=None, sequence_number=self._next_sequence(context)
533
+ )
534
+
535
+ async def _create_unknown_event(self, event_data: Any, context: dict[str, Any]) -> ResponseStreamEvent:
536
+ """Create event for unknown event types."""
537
+ text = f"Unknown event: {event_data!s}\\n"
538
+ return self._create_text_delta_event(text, context)
539
+
540
+ async def _create_unknown_content_event(self, content: Any, context: dict[str, Any]) -> ResponseStreamEvent:
541
+ """Create event for unknown content types."""
542
+ content_type = content.__class__.__name__
543
+ text = f"⚠️ Unknown content type: {content_type}\\n"
544
+ return self._create_text_delta_event(text, context)
545
+
546
+ def _chunk_json_string(self, json_str: str, chunk_size: int = 50) -> list[str]:
547
+ """Chunk JSON string for streaming."""
548
+ return [json_str[i : i + chunk_size] for i in range(0, len(json_str), chunk_size)]
549
+
550
+ async def _create_error_response(self, error_message: str, request: AgentFrameworkRequest) -> OpenAIResponse:
551
+ """Create error response."""
552
+ error_text = f"Error: {error_message}"
553
+
554
+ response_output_text = ResponseOutputText(type="output_text", text=error_text, annotations=[])
555
+
556
+ response_output_message = ResponseOutputMessage(
557
+ type="message",
558
+ role="assistant",
559
+ content=[response_output_text],
560
+ id=f"msg_{uuid.uuid4().hex[:8]}",
561
+ status="completed",
562
+ )
563
+
564
+ usage = ResponseUsage(
565
+ input_tokens=0,
566
+ output_tokens=0,
567
+ total_tokens=0,
568
+ input_tokens_details=InputTokensDetails(cached_tokens=0),
569
+ output_tokens_details=OutputTokensDetails(reasoning_tokens=0),
570
+ )
571
+
572
+ return OpenAIResponse(
573
+ id=f"resp_{uuid.uuid4().hex[:12]}",
574
+ object="response",
575
+ created_at=datetime.now().timestamp(),
576
+ model=request.model,
577
+ output=[response_output_message],
578
+ usage=usage,
579
+ parallel_tool_calls=False,
580
+ tool_choice="none",
581
+ tools=[],
582
+ )