agent-framework-devui 1.0.0b251007__py3-none-any.whl → 1.0.0b251028__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agent-framework-devui might be problematic. Click here for more details.
- agent_framework_devui/_conversations.py +473 -0
- agent_framework_devui/_discovery.py +295 -325
- agent_framework_devui/_executor.py +115 -246
- agent_framework_devui/_mapper.py +747 -88
- agent_framework_devui/_server.py +275 -240
- agent_framework_devui/_utils.py +150 -1
- agent_framework_devui/models/__init__.py +21 -10
- agent_framework_devui/models/_discovery_models.py +1 -2
- agent_framework_devui/models/_openai_custom.py +103 -83
- agent_framework_devui/ui/assets/index-CE4pGoXh.css +1 -0
- agent_framework_devui/ui/assets/index-D_Y1oSGu.js +577 -0
- agent_framework_devui/ui/index.html +2 -2
- agent_framework_devui-1.0.0b251028.dist-info/METADATA +321 -0
- agent_framework_devui-1.0.0b251028.dist-info/RECORD +23 -0
- agent_framework_devui/ui/assets/index-D0SfShuZ.js +0 -445
- agent_framework_devui/ui/assets/index-WsCIE0bH.css +0 -1
- agent_framework_devui-1.0.0b251007.dist-info/METADATA +0 -172
- agent_framework_devui-1.0.0b251007.dist-info/RECORD +0 -22
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251028.dist-info}/WHEEL +0 -0
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251028.dist-info}/entry_points.txt +0 -0
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251028.dist-info}/licenses/LICENSE +0 -0
agent_framework_devui/_utils.py
CHANGED
|
@@ -6,10 +6,159 @@ import inspect
|
|
|
6
6
|
import json
|
|
7
7
|
import logging
|
|
8
8
|
from dataclasses import fields, is_dataclass
|
|
9
|
-
from
|
|
9
|
+
from types import UnionType
|
|
10
|
+
from typing import Any, Union, get_args, get_origin
|
|
11
|
+
|
|
12
|
+
from agent_framework import ChatMessage
|
|
10
13
|
|
|
11
14
|
logger = logging.getLogger(__name__)
|
|
12
15
|
|
|
16
|
+
# ============================================================================
|
|
17
|
+
# Agent Metadata Extraction
|
|
18
|
+
# ============================================================================
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def extract_agent_metadata(entity_object: Any) -> dict[str, Any]:
|
|
22
|
+
"""Extract agent-specific metadata from an entity object.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
entity_object: Agent Framework agent object
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
Dictionary with agent metadata: instructions, model, chat_client_type,
|
|
29
|
+
context_providers, and middleware
|
|
30
|
+
"""
|
|
31
|
+
metadata = {
|
|
32
|
+
"instructions": None,
|
|
33
|
+
"model": None,
|
|
34
|
+
"chat_client_type": None,
|
|
35
|
+
"context_providers": None,
|
|
36
|
+
"middleware": None,
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
# Try to get instructions
|
|
40
|
+
if hasattr(entity_object, "chat_options") and hasattr(entity_object.chat_options, "instructions"):
|
|
41
|
+
metadata["instructions"] = entity_object.chat_options.instructions
|
|
42
|
+
|
|
43
|
+
# Try to get model - check both chat_options and chat_client
|
|
44
|
+
if (
|
|
45
|
+
hasattr(entity_object, "chat_options")
|
|
46
|
+
and hasattr(entity_object.chat_options, "model_id")
|
|
47
|
+
and entity_object.chat_options.model_id
|
|
48
|
+
):
|
|
49
|
+
metadata["model"] = entity_object.chat_options.model_id
|
|
50
|
+
elif hasattr(entity_object, "chat_client") and hasattr(entity_object.chat_client, "model_id"):
|
|
51
|
+
metadata["model"] = entity_object.chat_client.model_id
|
|
52
|
+
|
|
53
|
+
# Try to get chat client type
|
|
54
|
+
if hasattr(entity_object, "chat_client"):
|
|
55
|
+
metadata["chat_client_type"] = entity_object.chat_client.__class__.__name__
|
|
56
|
+
|
|
57
|
+
# Try to get context providers
|
|
58
|
+
if (
|
|
59
|
+
hasattr(entity_object, "context_provider")
|
|
60
|
+
and entity_object.context_provider
|
|
61
|
+
and hasattr(entity_object.context_provider, "__class__")
|
|
62
|
+
):
|
|
63
|
+
metadata["context_providers"] = [entity_object.context_provider.__class__.__name__] # type: ignore
|
|
64
|
+
|
|
65
|
+
# Try to get middleware
|
|
66
|
+
if hasattr(entity_object, "middleware") and entity_object.middleware:
|
|
67
|
+
middleware_list: list[str] = []
|
|
68
|
+
for m in entity_object.middleware:
|
|
69
|
+
# Try multiple ways to get a good name for middleware
|
|
70
|
+
if hasattr(m, "__name__"): # Function or callable
|
|
71
|
+
middleware_list.append(m.__name__)
|
|
72
|
+
elif hasattr(m, "__class__"): # Class instance
|
|
73
|
+
middleware_list.append(m.__class__.__name__)
|
|
74
|
+
else:
|
|
75
|
+
middleware_list.append(str(m))
|
|
76
|
+
metadata["middleware"] = middleware_list # type: ignore
|
|
77
|
+
|
|
78
|
+
return metadata
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
# ============================================================================
|
|
82
|
+
# Workflow Input Type Utilities
|
|
83
|
+
# ============================================================================
|
|
84
|
+
|
|
85
|
+
|
|
86
|
+
def extract_executor_message_types(executor: Any) -> list[Any]:
|
|
87
|
+
"""Extract declared input types for the given executor.
|
|
88
|
+
|
|
89
|
+
Args:
|
|
90
|
+
executor: Workflow executor object
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
List of message types that the executor accepts
|
|
94
|
+
"""
|
|
95
|
+
message_types: list[Any] = []
|
|
96
|
+
|
|
97
|
+
try:
|
|
98
|
+
input_types = getattr(executor, "input_types", None)
|
|
99
|
+
except Exception as exc: # pragma: no cover - defensive logging path
|
|
100
|
+
logger.debug(f"Failed to access executor input_types: {exc}")
|
|
101
|
+
else:
|
|
102
|
+
if input_types:
|
|
103
|
+
message_types = list(input_types)
|
|
104
|
+
|
|
105
|
+
if not message_types and hasattr(executor, "_handlers"):
|
|
106
|
+
try:
|
|
107
|
+
handlers = executor._handlers
|
|
108
|
+
if isinstance(handlers, dict):
|
|
109
|
+
message_types = list(handlers.keys())
|
|
110
|
+
except Exception as exc: # pragma: no cover - defensive logging path
|
|
111
|
+
logger.debug(f"Failed to read executor handlers: {exc}")
|
|
112
|
+
|
|
113
|
+
return message_types
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
def _contains_chat_message(type_hint: Any) -> bool:
|
|
117
|
+
"""Check whether the provided type hint directly or indirectly references ChatMessage."""
|
|
118
|
+
if type_hint is ChatMessage:
|
|
119
|
+
return True
|
|
120
|
+
|
|
121
|
+
origin = get_origin(type_hint)
|
|
122
|
+
if origin in (list, tuple):
|
|
123
|
+
return any(_contains_chat_message(arg) for arg in get_args(type_hint))
|
|
124
|
+
|
|
125
|
+
if origin in (Union, UnionType):
|
|
126
|
+
return any(_contains_chat_message(arg) for arg in get_args(type_hint))
|
|
127
|
+
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def select_primary_input_type(message_types: list[Any]) -> Any | None:
|
|
132
|
+
"""Choose the most user-friendly input type for workflow inputs.
|
|
133
|
+
|
|
134
|
+
Prefers ChatMessage (or containers thereof) and then falls back to primitives.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
message_types: List of possible message types
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Selected primary input type, or None if list is empty
|
|
141
|
+
"""
|
|
142
|
+
if not message_types:
|
|
143
|
+
return None
|
|
144
|
+
|
|
145
|
+
for message_type in message_types:
|
|
146
|
+
if _contains_chat_message(message_type):
|
|
147
|
+
return ChatMessage
|
|
148
|
+
|
|
149
|
+
preferred = (str, dict)
|
|
150
|
+
|
|
151
|
+
for candidate in preferred:
|
|
152
|
+
for message_type in message_types:
|
|
153
|
+
if message_type is candidate:
|
|
154
|
+
return candidate
|
|
155
|
+
origin = get_origin(message_type)
|
|
156
|
+
if origin is candidate:
|
|
157
|
+
return candidate
|
|
158
|
+
|
|
159
|
+
return message_types[0]
|
|
160
|
+
|
|
161
|
+
|
|
13
162
|
# ============================================================================
|
|
14
163
|
# Type System Utilities
|
|
15
164
|
# ============================================================================
|
|
@@ -4,11 +4,18 @@
|
|
|
4
4
|
|
|
5
5
|
# Import discovery models
|
|
6
6
|
# Import all OpenAI types directly from the openai package
|
|
7
|
+
from openai.types.conversations import Conversation, ConversationDeletedResource
|
|
8
|
+
from openai.types.conversations.conversation_item import ConversationItem
|
|
7
9
|
from openai.types.responses import (
|
|
8
10
|
Response,
|
|
11
|
+
ResponseCompletedEvent,
|
|
9
12
|
ResponseErrorEvent,
|
|
10
13
|
ResponseFunctionCallArgumentsDeltaEvent,
|
|
14
|
+
ResponseFunctionToolCall,
|
|
15
|
+
ResponseFunctionToolCallOutputItem,
|
|
11
16
|
ResponseInputParam,
|
|
17
|
+
ResponseOutputItemAddedEvent,
|
|
18
|
+
ResponseOutputItemDoneEvent,
|
|
12
19
|
ResponseOutputMessage,
|
|
13
20
|
ResponseOutputText,
|
|
14
21
|
ResponseReasoningTextDeltaEvent,
|
|
@@ -23,16 +30,14 @@ from openai.types.shared import Metadata, ResponsesModel
|
|
|
23
30
|
from ._discovery_models import DiscoveryResponse, EntityInfo
|
|
24
31
|
from ._openai_custom import (
|
|
25
32
|
AgentFrameworkRequest,
|
|
33
|
+
CustomResponseOutputItemAddedEvent,
|
|
34
|
+
CustomResponseOutputItemDoneEvent,
|
|
35
|
+
ExecutorActionItem,
|
|
26
36
|
OpenAIError,
|
|
27
37
|
ResponseFunctionResultComplete,
|
|
28
|
-
ResponseFunctionResultDelta,
|
|
29
38
|
ResponseTraceEvent,
|
|
30
39
|
ResponseTraceEventComplete,
|
|
31
|
-
ResponseTraceEventDelta,
|
|
32
|
-
ResponseUsageEventComplete,
|
|
33
|
-
ResponseUsageEventDelta,
|
|
34
40
|
ResponseWorkflowEventComplete,
|
|
35
|
-
ResponseWorkflowEventDelta,
|
|
36
41
|
)
|
|
37
42
|
|
|
38
43
|
# Type alias for compatibility
|
|
@@ -41,19 +46,29 @@ OpenAIResponse = Response
|
|
|
41
46
|
# Export all types for easy importing
|
|
42
47
|
__all__ = [
|
|
43
48
|
"AgentFrameworkRequest",
|
|
49
|
+
"Conversation",
|
|
50
|
+
"ConversationDeletedResource",
|
|
51
|
+
"ConversationItem",
|
|
52
|
+
"CustomResponseOutputItemAddedEvent",
|
|
53
|
+
"CustomResponseOutputItemDoneEvent",
|
|
44
54
|
"DiscoveryResponse",
|
|
45
55
|
"EntityInfo",
|
|
56
|
+
"ExecutorActionItem",
|
|
46
57
|
"InputTokensDetails",
|
|
47
58
|
"Metadata",
|
|
48
59
|
"OpenAIError",
|
|
49
60
|
"OpenAIResponse",
|
|
50
61
|
"OutputTokensDetails",
|
|
51
62
|
"Response",
|
|
63
|
+
"ResponseCompletedEvent",
|
|
52
64
|
"ResponseErrorEvent",
|
|
53
65
|
"ResponseFunctionCallArgumentsDeltaEvent",
|
|
54
66
|
"ResponseFunctionResultComplete",
|
|
55
|
-
"
|
|
67
|
+
"ResponseFunctionToolCall",
|
|
68
|
+
"ResponseFunctionToolCallOutputItem",
|
|
56
69
|
"ResponseInputParam",
|
|
70
|
+
"ResponseOutputItemAddedEvent",
|
|
71
|
+
"ResponseOutputItemDoneEvent",
|
|
57
72
|
"ResponseOutputMessage",
|
|
58
73
|
"ResponseOutputText",
|
|
59
74
|
"ResponseReasoningTextDeltaEvent",
|
|
@@ -61,12 +76,8 @@ __all__ = [
|
|
|
61
76
|
"ResponseTextDeltaEvent",
|
|
62
77
|
"ResponseTraceEvent",
|
|
63
78
|
"ResponseTraceEventComplete",
|
|
64
|
-
"ResponseTraceEventDelta",
|
|
65
79
|
"ResponseUsage",
|
|
66
|
-
"ResponseUsageEventComplete",
|
|
67
|
-
"ResponseUsageEventDelta",
|
|
68
80
|
"ResponseWorkflowEventComplete",
|
|
69
|
-
"ResponseWorkflowEventDelta",
|
|
70
81
|
"ResponsesModel",
|
|
71
82
|
"ToolParam",
|
|
72
83
|
]
|
|
@@ -31,8 +31,7 @@ class EntityInfo(BaseModel):
|
|
|
31
31
|
metadata: dict[str, Any] = Field(default_factory=dict)
|
|
32
32
|
|
|
33
33
|
# Source information
|
|
34
|
-
source: str = "directory" # "directory"
|
|
35
|
-
original_url: str | None = None
|
|
34
|
+
source: str = "directory" # "directory" or "in_memory"
|
|
36
35
|
|
|
37
36
|
# Environment variable requirements
|
|
38
37
|
required_env_vars: list[EnvVarRequirement] | None = None
|
|
@@ -3,11 +3,12 @@
|
|
|
3
3
|
"""Custom OpenAI-compatible event types for Agent Framework extensions.
|
|
4
4
|
|
|
5
5
|
These are custom event types that extend beyond the standard OpenAI Responses API
|
|
6
|
-
to support Agent Framework specific features like workflows
|
|
6
|
+
to support Agent Framework specific features like workflows and traces.
|
|
7
7
|
"""
|
|
8
8
|
|
|
9
9
|
from __future__ import annotations
|
|
10
10
|
|
|
11
|
+
from dataclasses import dataclass
|
|
11
12
|
from typing import Any, Literal
|
|
12
13
|
|
|
13
14
|
from pydantic import BaseModel, ConfigDict
|
|
@@ -15,59 +16,75 @@ from pydantic import BaseModel, ConfigDict
|
|
|
15
16
|
# Custom Agent Framework OpenAI event types for structured data
|
|
16
17
|
|
|
17
18
|
|
|
18
|
-
|
|
19
|
-
|
|
19
|
+
# Agent lifecycle events - simple and clear
|
|
20
|
+
class AgentStartedEvent:
|
|
21
|
+
"""Event emitted when an agent starts execution."""
|
|
20
22
|
|
|
21
|
-
|
|
22
|
-
delta: dict[str, Any]
|
|
23
|
-
executor_id: str | None = None
|
|
24
|
-
is_complete: bool = False # Track if this is the final part
|
|
25
|
-
item_id: str
|
|
26
|
-
output_index: int = 0
|
|
27
|
-
sequence_number: int
|
|
23
|
+
pass
|
|
28
24
|
|
|
29
25
|
|
|
30
|
-
class
|
|
31
|
-
"""
|
|
26
|
+
class AgentCompletedEvent:
|
|
27
|
+
"""Event emitted when an agent completes execution successfully."""
|
|
32
28
|
|
|
33
|
-
|
|
34
|
-
data: dict[str, Any] # Complete event data, not delta
|
|
35
|
-
executor_id: str | None = None
|
|
36
|
-
item_id: str
|
|
37
|
-
output_index: int = 0
|
|
38
|
-
sequence_number: int
|
|
29
|
+
pass
|
|
39
30
|
|
|
40
31
|
|
|
41
|
-
|
|
42
|
-
|
|
32
|
+
@dataclass
|
|
33
|
+
class AgentFailedEvent:
|
|
34
|
+
"""Event emitted when an agent fails during execution."""
|
|
43
35
|
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
36
|
+
error: Exception | None = None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ExecutorActionItem(BaseModel):
|
|
40
|
+
"""Custom item type for workflow executor actions.
|
|
41
|
+
|
|
42
|
+
This is a DevUI-specific extension to represent workflow executors as output items.
|
|
43
|
+
Since OpenAI's ResponseOutputItemAddedEvent only accepts specific item types,
|
|
44
|
+
and executor actions are not part of the standard, we need this custom type.
|
|
45
|
+
"""
|
|
46
|
+
|
|
47
|
+
type: Literal["executor_action"] = "executor_action"
|
|
48
|
+
id: str
|
|
49
|
+
executor_id: str
|
|
50
|
+
status: Literal["in_progress", "completed", "failed", "cancelled"] = "in_progress"
|
|
51
|
+
metadata: dict[str, Any] | None = None
|
|
52
|
+
result: Any | None = None
|
|
53
|
+
error: dict[str, Any] | None = None
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class CustomResponseOutputItemAddedEvent(BaseModel):
|
|
57
|
+
"""Custom version of ResponseOutputItemAddedEvent that accepts any item type.
|
|
58
|
+
|
|
59
|
+
This allows us to emit executor action items while maintaining the same
|
|
60
|
+
event structure as OpenAI's standard.
|
|
61
|
+
"""
|
|
62
|
+
|
|
63
|
+
type: Literal["response.output_item.added"] = "response.output_item.added"
|
|
64
|
+
output_index: int
|
|
50
65
|
sequence_number: int
|
|
66
|
+
item: dict[str, Any] | ExecutorActionItem | Any # Flexible item type
|
|
51
67
|
|
|
52
68
|
|
|
53
|
-
class
|
|
54
|
-
"""
|
|
69
|
+
class CustomResponseOutputItemDoneEvent(BaseModel):
|
|
70
|
+
"""Custom version of ResponseOutputItemDoneEvent that accepts any item type.
|
|
55
71
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
72
|
+
This allows us to emit executor action items while maintaining the same
|
|
73
|
+
event structure as OpenAI's standard.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
type: Literal["response.output_item.done"] = "response.output_item.done"
|
|
77
|
+
output_index: int
|
|
61
78
|
sequence_number: int
|
|
79
|
+
item: dict[str, Any] | ExecutorActionItem | Any # Flexible item type
|
|
62
80
|
|
|
63
81
|
|
|
64
|
-
class
|
|
65
|
-
"""
|
|
82
|
+
class ResponseWorkflowEventComplete(BaseModel):
|
|
83
|
+
"""Complete workflow event data."""
|
|
66
84
|
|
|
67
|
-
type: Literal["response.
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
is_complete: bool = False
|
|
85
|
+
type: Literal["response.workflow_event.complete"] = "response.workflow_event.complete"
|
|
86
|
+
data: dict[str, Any] # Complete event data, not delta
|
|
87
|
+
executor_id: str | None = None
|
|
71
88
|
item_id: str
|
|
72
89
|
output_index: int = 0
|
|
73
90
|
sequence_number: int
|
|
@@ -84,25 +101,27 @@ class ResponseTraceEventComplete(BaseModel):
|
|
|
84
101
|
sequence_number: int
|
|
85
102
|
|
|
86
103
|
|
|
87
|
-
class
|
|
88
|
-
"""
|
|
89
|
-
|
|
90
|
-
type: Literal["response.usage.delta"] = "response.usage.delta"
|
|
91
|
-
delta: dict[str, Any]
|
|
92
|
-
is_complete: bool = False
|
|
93
|
-
item_id: str
|
|
94
|
-
output_index: int = 0
|
|
95
|
-
sequence_number: int
|
|
104
|
+
class ResponseFunctionResultComplete(BaseModel):
|
|
105
|
+
"""DevUI extension: Stream function execution results.
|
|
96
106
|
|
|
107
|
+
This is a DevUI extension because:
|
|
108
|
+
- OpenAI Responses API doesn't stream function results (clients execute functions)
|
|
109
|
+
- Agent Framework executes functions server-side, so we stream results for debugging visibility
|
|
110
|
+
- ResponseFunctionToolCallOutputItem exists in OpenAI SDK but isn't in ResponseOutputItem union
|
|
111
|
+
(it's for Conversations API input, not Responses API streaming output)
|
|
97
112
|
|
|
98
|
-
|
|
99
|
-
|
|
113
|
+
This event provides the same structure as OpenAI's function output items but wrapped
|
|
114
|
+
in a custom event type since standard events don't support streaming function results.
|
|
115
|
+
"""
|
|
100
116
|
|
|
101
|
-
type: Literal["response.
|
|
102
|
-
|
|
117
|
+
type: Literal["response.function_result.complete"] = "response.function_result.complete"
|
|
118
|
+
call_id: str
|
|
119
|
+
output: str
|
|
120
|
+
status: Literal["in_progress", "completed", "incomplete"]
|
|
103
121
|
item_id: str
|
|
104
122
|
output_index: int = 0
|
|
105
123
|
sequence_number: int
|
|
124
|
+
timestamp: str | None = None # Optional timestamp for UI display
|
|
106
125
|
|
|
107
126
|
|
|
108
127
|
# Agent Framework extension fields
|
|
@@ -110,25 +129,28 @@ class AgentFrameworkExtraBody(BaseModel):
|
|
|
110
129
|
"""Agent Framework specific routing fields for OpenAI requests."""
|
|
111
130
|
|
|
112
131
|
entity_id: str
|
|
113
|
-
|
|
114
|
-
input_data: dict[str, Any] | None = None
|
|
132
|
+
# input_data removed - now using standard input field for all data
|
|
115
133
|
|
|
116
134
|
model_config = ConfigDict(extra="allow")
|
|
117
135
|
|
|
118
136
|
|
|
119
137
|
# Agent Framework Request Model - Extending real OpenAI types
|
|
120
138
|
class AgentFrameworkRequest(BaseModel):
|
|
121
|
-
"""OpenAI ResponseCreateParams with Agent Framework
|
|
139
|
+
"""OpenAI ResponseCreateParams with Agent Framework routing.
|
|
122
140
|
|
|
123
|
-
This properly extends the real OpenAI API request format
|
|
124
|
-
|
|
141
|
+
This properly extends the real OpenAI API request format.
|
|
142
|
+
- Uses 'model' field as entity_id (agent/workflow name)
|
|
143
|
+
- Uses 'conversation' field for conversation context (OpenAI standard)
|
|
125
144
|
"""
|
|
126
145
|
|
|
127
146
|
# All OpenAI fields from ResponseCreateParams
|
|
128
|
-
model: str
|
|
129
|
-
input: str | list[Any] # ResponseInputParam
|
|
147
|
+
model: str # Used as entity_id in DevUI!
|
|
148
|
+
input: str | list[Any] | dict[str, Any] # ResponseInputParam + dict for workflow structured input
|
|
130
149
|
stream: bool | None = False
|
|
131
150
|
|
|
151
|
+
# OpenAI conversation parameter (standard!)
|
|
152
|
+
conversation: str | dict[str, Any] | None = None # Union[str, {"id": str}]
|
|
153
|
+
|
|
132
154
|
# Common OpenAI optional fields
|
|
133
155
|
instructions: str | None = None
|
|
134
156
|
metadata: dict[str, Any] | None = None
|
|
@@ -136,32 +158,35 @@ class AgentFrameworkRequest(BaseModel):
|
|
|
136
158
|
max_output_tokens: int | None = None
|
|
137
159
|
tools: list[dict[str, Any]] | None = None
|
|
138
160
|
|
|
139
|
-
#
|
|
140
|
-
extra_body:
|
|
141
|
-
|
|
142
|
-
entity_id: str | None = None # Allow entity_id as top-level field
|
|
161
|
+
# Optional extra_body for advanced use cases
|
|
162
|
+
extra_body: dict[str, Any] | None = None
|
|
143
163
|
|
|
144
164
|
model_config = ConfigDict(extra="allow")
|
|
145
165
|
|
|
146
|
-
def get_entity_id(self) -> str
|
|
147
|
-
"""Get entity_id from
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
166
|
+
def get_entity_id(self) -> str:
|
|
167
|
+
"""Get entity_id from model field.
|
|
168
|
+
|
|
169
|
+
In DevUI, model IS the entity_id (agent/workflow name).
|
|
170
|
+
Simple and clean!
|
|
171
|
+
"""
|
|
172
|
+
return self.model
|
|
173
|
+
|
|
174
|
+
def get_conversation_id(self) -> str | None:
|
|
175
|
+
"""Extract conversation_id from conversation parameter.
|
|
176
|
+
|
|
177
|
+
Supports both string and object forms:
|
|
178
|
+
- conversation: "conv_123"
|
|
179
|
+
- conversation: {"id": "conv_123"}
|
|
180
|
+
"""
|
|
181
|
+
if isinstance(self.conversation, str):
|
|
182
|
+
return self.conversation
|
|
183
|
+
if isinstance(self.conversation, dict):
|
|
184
|
+
return self.conversation.get("id")
|
|
156
185
|
return None
|
|
157
186
|
|
|
158
187
|
def to_openai_params(self) -> dict[str, Any]:
|
|
159
188
|
"""Convert to dict for OpenAI client compatibility."""
|
|
160
|
-
|
|
161
|
-
if self.extra_body:
|
|
162
|
-
# Don't merge extra_body into main params to keep them separate
|
|
163
|
-
data["extra_body"] = self.extra_body
|
|
164
|
-
return data
|
|
189
|
+
return self.model_dump(exclude_none=True)
|
|
165
190
|
|
|
166
191
|
|
|
167
192
|
# Error handling
|
|
@@ -198,12 +223,7 @@ __all__ = [
|
|
|
198
223
|
"AgentFrameworkRequest",
|
|
199
224
|
"OpenAIError",
|
|
200
225
|
"ResponseFunctionResultComplete",
|
|
201
|
-
"ResponseFunctionResultDelta",
|
|
202
226
|
"ResponseTraceEvent",
|
|
203
227
|
"ResponseTraceEventComplete",
|
|
204
|
-
"ResponseTraceEventDelta",
|
|
205
|
-
"ResponseUsageEventComplete",
|
|
206
|
-
"ResponseUsageEventDelta",
|
|
207
228
|
"ResponseWorkflowEventComplete",
|
|
208
|
-
"ResponseWorkflowEventDelta",
|
|
209
229
|
]
|