agent-dev-cli 0.0.1b251223__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,302 @@
1
+ import time
2
+ from typing import Callable, Type, Any
3
+ from functools import wraps
4
+ import uuid
5
+ from datetime import datetime
6
+ import sys
7
+ import logging
8
+
9
+ from agent_framework import WorkflowEvent, WorkflowStartedEvent, WorkflowFailedEvent, WorkflowStatusEvent, WorkflowRunState, ExecutorEvent, AgentRunUpdateEvent, ExecutorInvokedEvent, ExecutorCompletedEvent, AgentRunResponseUpdate, FunctionCallContent, FunctionResultContent, TextContent
10
+
11
+ from ._utils import serialize_data
12
+ from .structs.request import AgentFrameworkRequest
13
+
14
+
15
+ # Access or store common data across different events
16
+ class MapperContext:
17
+ # Input
18
+ request: AgentFrameworkRequest
19
+
20
+ # Generated
21
+ # global
22
+ response_id: str | None
23
+ response_created_at: float | None
24
+
25
+ # The last seen item id
26
+ item_id: str | None = None
27
+ output_index: int = 0
28
+
29
+ # The last seen call_id for function calling
30
+ call_id: str | None = None
31
+
32
+
33
+ def event_mapper(event_type: Type[Any]) -> Callable:
34
+ """Decorator to register an event mapper method for a specific event type.
35
+
36
+ Args:
37
+ event_type: The event type class this mapper handles
38
+
39
+ Returns:
40
+ Decorator function that registers the mapper
41
+ """
42
+ def decorator(func: Callable) -> Callable:
43
+ @wraps(func)
44
+ def wrapper(self: 'EventMapper', ctx: MapperContext, event: Any) -> list[dict]:
45
+ return func(self, ctx, event)
46
+
47
+ # Store event type on the wrapper for registration
48
+ wrapper._event_type = event_type # type: ignore
49
+ return wrapper
50
+
51
+ return decorator
52
+
53
+ class EventMapper():
54
+
55
+ def __init__(self):
56
+ """Initialize the EventMapper and register all decorated mapper methods."""
57
+ self._event_mappers = {}
58
+
59
+ # Scan all methods for event_mapper decorators and register them
60
+ for attr_name in dir(self):
61
+ attr = getattr(self, attr_name)
62
+ if callable(attr) and hasattr(attr, '_event_type'):
63
+ event_type = attr._event_type
64
+ self._event_mappers[event_type] = attr
65
+
66
+ def _get_event_mapper(self, event_type: Type[Any]) -> Callable | None:
67
+ # Direct lookup for exact type match
68
+ if event_type in self._event_mappers:
69
+ return self._event_mappers[event_type]
70
+
71
+ for registered_type, mapper in self._event_mappers.items():
72
+ try:
73
+ if isinstance(event_type, type) and issubclass(event_type, registered_type):
74
+ return mapper
75
+ except TypeError:
76
+ # Not a class type, skip
77
+ pass
78
+
79
+ return None
80
+
81
+ @event_mapper(WorkflowEvent)
82
+ def _map_workflow_event(self, ctx: MapperContext, event: WorkflowEvent) -> list[dict]:
83
+ if isinstance(event, WorkflowStartedEvent):
84
+ ctx.response_id = str(uuid.uuid4())
85
+ ctx.response_created_at = time.time()
86
+
87
+ event_type = None
88
+ if isinstance(event, WorkflowStartedEvent):
89
+ event_type = "response.created"
90
+ elif isinstance(event, WorkflowFailedEvent):
91
+ return [{
92
+ "type": "response.failed",
93
+ "response":{
94
+ "id": ctx.response_id,
95
+ "created_at": ctx.response_created_at,
96
+ "model": ctx.request.model,
97
+ "error": {
98
+ "code": event.details.error_type,
99
+ "message": event.details.message,
100
+ "traceback": event.details.traceback,
101
+ },
102
+ },
103
+ }]
104
+ elif isinstance(event, WorkflowStatusEvent):
105
+ if event.state == WorkflowRunState.IN_PROGRESS:
106
+ event_type = "response.in_progress"
107
+
108
+ if event_type:
109
+ return [{
110
+ "type": event_type,
111
+ "response":{
112
+ "id": ctx.response_id,
113
+ "created_at": ctx.response_created_at,
114
+ "model": ctx.request.model,
115
+ },
116
+ }]
117
+
118
+ return []
119
+
120
+ @event_mapper(AgentRunUpdateEvent)
121
+ def _map_agent_run_update_event(self, ctx: MapperContext, event: AgentRunUpdateEvent) -> list[dict]:
122
+ return [
123
+ {
124
+ "type":"response.output_text.delta",
125
+ "content_index":0,
126
+ "delta": event.data.text,
127
+ "item_id": ctx.item_id + "_message",
128
+ "logprobs":[],
129
+ "output_index":ctx.output_index,
130
+ }
131
+ ]
132
+
133
+ @event_mapper(ExecutorEvent)
134
+ def _map_executor_event(self, ctx: MapperContext, event: ExecutorEvent) -> list[dict]:
135
+ event_type = None
136
+ status = None
137
+ if isinstance(event, ExecutorInvokedEvent):
138
+ ctx.item_id = f"exec_{event.executor_id}_{str(uuid.uuid4())[:8]}"
139
+ ctx.output_index += 1
140
+ event_type = "response.output_item.added"
141
+ status = "in_progress"
142
+ elif isinstance(event, ExecutorCompletedEvent):
143
+ event_type = "response.output_item.done"
144
+ status = "completed"
145
+ else:
146
+ return []
147
+
148
+ events = []
149
+ if event_type:
150
+ # Serialize event data for frontend display
151
+ serialized_data = None
152
+ if event.data is not None:
153
+ try:
154
+ serialized_data = serialize_data(event.data)
155
+ logging.debug(f"[Executor] {event.executor_id} serialized data: {serialized_data}")
156
+ except Exception as e:
157
+ logging.warning(f"Failed to serialize event data: {e}")
158
+ serialized_data = str(event.data)
159
+
160
+ # Build executor item with input/output data based on event type
161
+ executor_item: dict[str, Any] = {
162
+ "type": "executor_action",
163
+ "id": ctx.item_id,
164
+ "executor_id": event.executor_id,
165
+ "status": status,
166
+ }
167
+
168
+ # For ExecutorInvokedEvent, include input data
169
+ if isinstance(event, ExecutorInvokedEvent):
170
+ executor_item["input"] = serialized_data
171
+ # For ExecutorCompletedEvent, include output data
172
+ elif isinstance(event, ExecutorCompletedEvent):
173
+ executor_item["output"] = serialized_data
174
+
175
+ events.append(
176
+ # Agent framework extended executor item
177
+ {
178
+ "type": event_type,
179
+ "output_index": ctx.output_index,
180
+ "item": executor_item,
181
+ }
182
+ )
183
+ events.append(
184
+ # Standard OpenAI message item
185
+ {
186
+ "type": event_type,
187
+ "output_index": ctx.output_index,
188
+ "item": {
189
+ "type": "message",
190
+ "id": ctx.item_id + "_message",
191
+ "status": status,
192
+ "role": "assistant",
193
+ "content": [],
194
+ }
195
+ }
196
+ )
197
+
198
+ # OpenAI standard content part added event
199
+ if isinstance(event, ExecutorInvokedEvent):
200
+ events.append({
201
+ "type": "response.content_part.added",
202
+ "output_index": ctx.output_index,
203
+ "item_id": ctx.item_id + "_message",
204
+ "content_index": 0,
205
+ "part":{
206
+ "text":"",
207
+ "type":"output_text"
208
+ }
209
+ })
210
+ return events
211
+
212
+ @event_mapper(AgentRunResponseUpdate)
213
+ def _map_agent_run_response_update(self, ctx: MapperContext, event: AgentRunResponseUpdate) -> list[dict]:
214
+ item_id = event.message_id
215
+ results = []
216
+ if item_id and item_id != ctx.item_id:
217
+ ctx.item_id = item_id
218
+ ctx.output_index += 1
219
+ results.append({
220
+ "type":"response.output_item.added",
221
+ "output_index": ctx.output_index,
222
+ "item":{
223
+ "type":"message",
224
+ "id":"msg_a6a8c564",
225
+ "content":[],
226
+ "role":"assistant",
227
+ "status":"in_progress"
228
+ },
229
+ })
230
+ results.append({
231
+ "type":"response.content_part.added",
232
+ "output_index": ctx.output_index,
233
+ "content_index": 0,
234
+ "item_id": ctx.item_id,
235
+ "part": {
236
+ "type":"output_text",
237
+ "text":"",
238
+ }
239
+ })
240
+
241
+ for content in event.contents:
242
+ if isinstance(content, FunctionCallContent):
243
+ # Arguments are always streamed in OpenAI. But not always in Agent Framework.
244
+ # Argument streaming use last call_id to track the function call instance.
245
+ if ctx.call_id and content.arguments:
246
+ results.append({
247
+ "type": "response.function_call_arguments.delta",
248
+ "output_index": ctx.output_index,
249
+ # DevUI set OpenAI's item_id to the value of Agent Framework's call_id, which might be a bug
250
+ "item_id": ctx.call_id,
251
+ "delta": content.arguments,
252
+ })
253
+ else:
254
+ if content.call_id != ctx.call_id:
255
+ ctx.call_id = content.call_id
256
+ ctx.output_index += 1
257
+ results.append({
258
+ "type":"response.output_item.added",
259
+ "output_index": ctx.output_index,
260
+ "item":{
261
+ "type":"function_call",
262
+ "arguments":"",
263
+ "call_id": ctx.call_id,
264
+ "name": content.name,
265
+ "id": content.call_id,
266
+ "status":"in_progress",
267
+ },
268
+ })
269
+ elif isinstance(content, FunctionResultContent):
270
+ results.append({
271
+ "type":"response.function_result.complete",
272
+ "call_id": content.call_id,
273
+ "output_index": ctx.output_index,
274
+ "output":content.result,
275
+ "status":"completed",
276
+ "item_id": ctx.item_id,
277
+ "timestamp":datetime.now().isoformat(),
278
+ })
279
+
280
+ elif isinstance(content, TextContent):
281
+ results.append(
282
+ {
283
+ "type":"response.output_text.delta",
284
+ "content_index":0,
285
+ "delta": content.text,
286
+ "item_id": ctx.item_id,
287
+ "output_index": ctx.output_index,
288
+ }
289
+ )
290
+ else:
291
+ print("Unknown content: " + str(type(content)), file=sys.stderr)
292
+ return results
293
+
294
+ def map_event(self, ctx: MapperContext, event: Any) -> list[dict]:
295
+ """Map an Agent Framework event to OpenAI Responses API events"""
296
+ mapper = self._get_event_mapper(type(event))
297
+ if not mapper:
298
+ print("Unknown event: " + type(event))
299
+ return []
300
+
301
+ return mapper(ctx, event)
302
+
@@ -0,0 +1,262 @@
1
+ """
2
+ TestTool server setup module.
3
+
4
+ This module provides functionality to set up test tool endpoints
5
+ for agent servers.
6
+ """
7
+ from typing import Any, AsyncGenerator
8
+ import json
9
+ import uuid
10
+ import asyncio
11
+ import logging
12
+
13
+ from starlette.applications import Starlette
14
+ from starlette.requests import Request
15
+ from starlette.responses import JSONResponse, StreamingResponse
16
+ from starlette.routing import Route, WebSocketRoute
17
+ from starlette.websockets import WebSocket, WebSocketDisconnect
18
+
19
+ from agent_framework import Workflow, ChatMessage, AgentProtocol, Executor
20
+
21
+ from .event_mapper import EventMapper, MapperContext
22
+ from .structs.request import AgentFrameworkRequest
23
+ from .structs.entity_response import EntityResponse
24
+ from ._conversations import ConversationStore, InMemoryConversationStore
25
+ from .code_analyzer import get_executor_location
26
+ from .errors import ExecutorInputNotSupported
27
+
28
+ class TestToolServer:
29
+ """Server class for mounting test tool endpoints."""
30
+ _entities: list[Workflow | AgentProtocol]
31
+ _events_mapper: EventMapper
32
+ _conversation_store: ConversationStore
33
+ _connection_id: str
34
+
35
+ def __init__(self, entities: list[Workflow | AgentProtocol]):
36
+ self._events_mapper = EventMapper()
37
+ self._entities = entities
38
+ self._conversation_store = InMemoryConversationStore()
39
+ self._connection_id = str(uuid.uuid4())
40
+
41
+ def convert_input_data(self, input_data: dict | list[dict], expected_type: type) -> Any:
42
+ """
43
+ Convert input data (always ChatMessage-like dict) to the expected input type of the entity (defined by user app)
44
+ """
45
+ if expected_type == list[ChatMessage]:
46
+ if type(input_data) == list:
47
+ chat_messages = [self.openai_chat_message_to_agent_framework_chat_message(item) for item in input_data]
48
+ return chat_messages
49
+ else:
50
+ chat_message = self.openai_chat_message_to_agent_framework_chat_message(input_data)
51
+ return [chat_message]
52
+ elif expected_type == ChatMessage:
53
+ chat_message = self.openai_chat_message_to_agent_framework_chat_message(input_data if type(input_data) == dict else input_data[0])
54
+ return chat_message
55
+ else:
56
+ raise ExecutorInputNotSupported(f"Unsupported input type for conversion: {expected_type}")
57
+
58
+ def openai_chat_message_to_agent_framework_chat_message(self, message: dict) -> ChatMessage:
59
+ """Convert OpenAI chat message dict to Agent Framework ChatMessage."""
60
+ role = message.get("role")
61
+ text = message.get("text", None)
62
+ content = message.get("content", "")
63
+ if text:
64
+ return ChatMessage(role=role, text=text)
65
+ elif content:
66
+ contents = []
67
+ for c in content:
68
+ if type(c) == dict:
69
+ if c["type"] == "input_text":
70
+ result_content = {
71
+ "type": "text",
72
+ "text": c["text"]
73
+ }
74
+ contents.append(result_content)
75
+ # TODO: support other content types like images, files, tool calls, etc.
76
+ return ChatMessage(role=role, contents=contents)
77
+ return ChatMessage(role=role, contents=contents, text=text)
78
+
79
+
80
+ def mount_backend(self, root_app: Starlette):
81
+ app = Starlette(routes=[
82
+ # Responses API for workflow and agent
83
+ Route("/v1/responses", endpoint=self.responses, methods=["POST"]),
84
+ Route("/entities", endpoint=self.list_entities, methods=["GET"]),
85
+ Route("/entities/{entity_id}/info", endpoint=self.get_entity_info, methods=["GET"]),
86
+ Route("/entities/{entity_id}/executor/{executor_id}/location", endpoint=self.get_executor_location, methods=["GET"]),
87
+ Route("/conversations", endpoint=self.list_conversations, methods=["GET"]),
88
+ Route("/conversations", endpoint=self.create_conversation, methods=["POST"]),
89
+ WebSocketRoute("/ws/health", endpoint=self.websocket_health),
90
+ ])
91
+ root_app.mount("/agentdev/", app)
92
+
93
+ async def list_entities(self, raw_request: Request):
94
+ entities_info = []
95
+ for entity in self._entities:
96
+ entity_info = EntityResponse.from_agent_framework(entity)
97
+ entities_info.append(entity_info.model_dump())
98
+ return JSONResponse({"entities": entities_info})
99
+
100
+ async def get_entity_info(self, raw_request: Request):
101
+ entity_id = raw_request.path_params["entity_id"]
102
+ entity = self._get_entity(entity_id)
103
+ if not entity:
104
+ return JSONResponse({"error": "Entity not found"}, status_code=404)
105
+
106
+ return JSONResponse(EntityResponse.from_agent_framework(entity).model_dump())
107
+
108
+ async def get_executor_location(self, raw_request: Request):
109
+ entity_id = raw_request.path_params["entity_id"]
110
+ executor_id = raw_request.path_params["executor_id"]
111
+
112
+ entity = self._get_entity(entity_id)
113
+ if not entity:
114
+ return JSONResponse({"error": "Entity not found"}, status_code=404)
115
+
116
+ # Only workflows have executors
117
+ if not isinstance(entity, Workflow):
118
+ return JSONResponse({"error": "Entity is not a workflow"}, status_code=400)
119
+
120
+ # Find the executor in the workflow
121
+ executor = self._find_executor(entity, executor_id)
122
+ if not executor:
123
+ return JSONResponse({"error": "Executor not found"}, status_code=404)
124
+
125
+ location = get_executor_location(executor)
126
+ if location is None:
127
+ return JSONResponse({"error": f"Could not determine executor location for {executor_id}, type={type(executor)}"}, status_code=400)
128
+ return JSONResponse({
129
+ "file_path": location.file_path,
130
+ "line_number": location.line_number,
131
+ })
132
+
133
+ async def list_conversations(self, raw_request: Request):
134
+ items = self._conversation_store.list_conversations_by_metadata(metadata_filter=raw_request.query_params)
135
+ return JSONResponse({
136
+ "object": "list",
137
+ "data": items,
138
+ # For simplicity, we do not support pagination in conversation listing for now
139
+ "has_more": False,
140
+ })
141
+
142
+ async def create_conversation(self, raw_request: Request):
143
+ request_data = await raw_request.json()
144
+ conversation = self._conversation_store.create_conversation(metadata=request_data.get("metadata", {}))
145
+ return JSONResponse(conversation.model_dump())
146
+
147
+ async def responses(self, raw_request: Request):
148
+ raw_data = await raw_request.json()
149
+ request = AgentFrameworkRequest(**raw_data)
150
+
151
+ entity = self._get_entity(request.model)
152
+ if not entity:
153
+ return JSONResponse({"error": "Model not found"}, status_code=404)
154
+
155
+ return StreamingResponse(
156
+ self._stream_execution(entity, request),
157
+ media_type="text/event-stream",
158
+ headers={
159
+ "Cache-Control": "no-cache",
160
+ "Connection": "keep-alive",
161
+ "Access-Control-Allow-Origin": "*",
162
+ },
163
+ )
164
+
165
+ async def _stream_execution(self, entity: Workflow | AgentProtocol, request: AgentFrameworkRequest) -> AsyncGenerator[str, None]:
166
+ if isinstance(entity, Workflow):
167
+ pass
168
+ elif isinstance(entity, AgentProtocol):
169
+ conversation_id = request.conversation["id"] if isinstance(request.conversation, dict) else request.conversation
170
+ if not conversation_id:
171
+ raise RuntimeError("Conversation ID must be provided for agent execution")
172
+
173
+ # Extract input from the request
174
+ input_raw = request.input
175
+ if type(input_raw) != dict and type(input_raw) != list:
176
+ raise RuntimeError("Only dict or list input type is supported in test tool server for now")
177
+
178
+ if isinstance(entity, Workflow):
179
+ start_executor_type = None
180
+ for input_type in entity.get_start_executor().input_types:
181
+ try:
182
+ input_data = self.convert_input_data(input_raw, input_type)
183
+ except ExecutorInputNotSupported:
184
+ continue
185
+ if input_data is None:
186
+ raise ExecutorInputNotSupported("No supported input type found for workflow start executor, start executor input types: " + str(entity.get_start_executor().input_types))
187
+ else:
188
+ # TODO: fetch conversation history from conversation store by ID
189
+ # Agents always support multiple input types including list[ChatMessage]
190
+ input_data = self.convert_input_data(input_raw, list[ChatMessage])
191
+
192
+ ctx = MapperContext()
193
+ ctx.request = request
194
+ try:
195
+ async for agent_framework_event in entity.run_stream(input_data):
196
+ if agent_framework_event and hasattr(agent_framework_event, "to_json"):
197
+ logging.debug("Emit agent framework event: ", agent_framework_event.to_json())
198
+ else:
199
+ logging.debug("Emit agent framework event: ", agent_framework_event)
200
+ openai_events = self._events_mapper.map_event(ctx, agent_framework_event)
201
+ for openai_event in openai_events:
202
+ if openai_event:
203
+ payload = json.dumps(openai_event)
204
+ yield f"data: {payload}\n\n"
205
+ finally:
206
+ yield f"data: [DONE]\n\n"
207
+
208
+ def _get_entity(self, model_name: str) -> Workflow | AgentProtocol | None:
209
+ # Because contain agents only support a single agent / workflow for now, we can just return the first one.
210
+ results = list(filter(lambda item: item.id == model_name, self._entities))
211
+ if not results:
212
+ return None
213
+ entity = results[0]
214
+ return entity
215
+
216
+ def _find_executor(self, workflow: Workflow, executor_id: str) -> Executor | None:
217
+ # Search for the executor with matching ID
218
+ for name in workflow.executors:
219
+ if name == executor_id:
220
+ return workflow.executors[name]
221
+ return None
222
+
223
+ def _get_entity_start_executor_types(self, entity: Workflow) -> Any:
224
+ return entity.get_start_executor().input_types[0]
225
+
226
+ async def websocket_health(self, websocket: WebSocket):
227
+ """WebSocket endpoint for health check and connection monitoring."""
228
+ await websocket.accept()
229
+ try:
230
+ # Send initial connection info with unique connection ID
231
+ await websocket.send_json({
232
+ "type": "connected",
233
+ "connection_id": self._connection_id,
234
+ "timestamp": asyncio.get_event_loop().time()
235
+ })
236
+
237
+ # Keep connection alive with periodic pings
238
+ while True:
239
+ try:
240
+ # Wait for ping from client or send periodic health check
241
+ data = await asyncio.wait_for(websocket.receive_text(), timeout=30.0)
242
+ if data == "ping":
243
+ await websocket.send_json({
244
+ "type": "pong",
245
+ "connection_id": self._connection_id,
246
+ "timestamp": asyncio.get_event_loop().time()
247
+ })
248
+ except asyncio.TimeoutError:
249
+ # Send periodic health check
250
+ await websocket.send_json({
251
+ "type": "health",
252
+ "connection_id": self._connection_id,
253
+ "timestamp": asyncio.get_event_loop().time()
254
+ })
255
+ except WebSocketDisconnect:
256
+ pass
257
+ except Exception as e:
258
+ print(f"WebSocket error: {e}")
259
+ try:
260
+ await websocket.close()
261
+ except:
262
+ pass
@@ -0,0 +1,10 @@
1
+ """
2
+ agentdev Package
3
+
4
+ A simple package for setting up workflow visualization with agent servers.
5
+ """
6
+
7
+ from .request import AgentFrameworkRequest
8
+
9
+ __version__ = "0.1.0"
10
+ __all__ = ["AgentFrameworkRequest"]
@@ -0,0 +1,30 @@
1
+ from agent_framework import AgentProtocol, Workflow
2
+ from pydantic import BaseModel
3
+ from typing import Any
4
+
5
+ class EntityResponse(BaseModel):
6
+
7
+ @classmethod
8
+ def from_agent_framework(cls, entity: AgentProtocol | Workflow) -> "EntityResponse":
9
+ return EntityResponse(
10
+ id=entity.id,
11
+ type="workflow" if isinstance(entity, Workflow) else "agent",
12
+ name=entity.name or "",
13
+ description=entity.description or "",
14
+ framework="agent_framework",
15
+ executors=list(entity.executors.keys()) if isinstance(entity, Workflow) else [],
16
+ start_executor_id=entity.get_start_executor().id if isinstance(entity, Workflow) else None,
17
+ workflow_dump=entity.to_dict() if isinstance(entity, Workflow) else None,
18
+ input_schema={"type": "object"},
19
+ )
20
+
21
+ id: str
22
+ type: str
23
+ name: str
24
+ description: str
25
+ framework: str
26
+ metadata: dict = {}
27
+ executors: list[str]
28
+ start_executor_id: str | None
29
+ workflow_dump: dict | None = None
30
+ input_schema: dict | None
@@ -0,0 +1,57 @@
1
+ from pydantic import BaseModel, ConfigDict
2
+ from typing import Any, Literal
3
+
4
+ # Agent Framework Request Model - Extending real OpenAI types
5
+ class AgentFrameworkRequest(BaseModel):
6
+ """OpenAI ResponseCreateParams with Agent Framework routing.
7
+
8
+ This properly extends the real OpenAI API request format.
9
+ - Uses 'model' field as entity_id (agent/workflow name)
10
+ - Uses 'conversation' field for conversation context (OpenAI standard)
11
+ """
12
+
13
+ # All OpenAI fields from ResponseCreateParams
14
+ model: str # Used as entity_id
15
+ input: str | list[Any] | dict[str, Any] # ResponseInputParam + dict for workflow structured input
16
+ stream: bool | None = False
17
+
18
+ # OpenAI conversation parameter (standard!)
19
+ conversation: str | dict[str, Any] | None = None # Union[str, {"id": str}]
20
+
21
+ # Common OpenAI optional fields
22
+ instructions: str | None = None
23
+ metadata: dict[str, Any] | None = None
24
+ temperature: float | None = None
25
+ max_output_tokens: int | None = None
26
+ tools: list[dict[str, Any]] | None = None
27
+
28
+ # Optional extra_body for advanced use cases
29
+ extra_body: dict[str, Any] | None = None
30
+
31
+ model_config = ConfigDict(extra="allow")
32
+
33
+ def get_entity_id(self) -> str:
34
+ """Get entity_id from model field.
35
+
36
+ model IS the entity_id (agent/workflow name).
37
+ Simple and clean!
38
+ """
39
+ return self.model
40
+
41
+ def get_conversation_id(self) -> str | None:
42
+ """Extract conversation_id from conversation parameter.
43
+
44
+ Supports both string and object forms:
45
+ - conversation: "conv_123"
46
+ - conversation: {"id": "conv_123"}
47
+ """
48
+ if isinstance(self.conversation, str):
49
+ return self.conversation
50
+ if isinstance(self.conversation, dict):
51
+ return self.conversation.get("id")
52
+ return None
53
+
54
+ def to_openai_params(self) -> dict[str, Any]:
55
+ """Convert to dict for OpenAI client compatibility."""
56
+ return self.model_dump(exclude_none=True)
57
+