agent-framework-devui 1.0.0b251007__py3-none-any.whl → 1.0.0b251028__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agent-framework-devui might be problematic. Click here for more details.
- agent_framework_devui/_conversations.py +473 -0
- agent_framework_devui/_discovery.py +295 -325
- agent_framework_devui/_executor.py +115 -246
- agent_framework_devui/_mapper.py +747 -88
- agent_framework_devui/_server.py +275 -240
- agent_framework_devui/_utils.py +150 -1
- agent_framework_devui/models/__init__.py +21 -10
- agent_framework_devui/models/_discovery_models.py +1 -2
- agent_framework_devui/models/_openai_custom.py +103 -83
- agent_framework_devui/ui/assets/index-CE4pGoXh.css +1 -0
- agent_framework_devui/ui/assets/index-D_Y1oSGu.js +577 -0
- agent_framework_devui/ui/index.html +2 -2
- agent_framework_devui-1.0.0b251028.dist-info/METADATA +321 -0
- agent_framework_devui-1.0.0b251028.dist-info/RECORD +23 -0
- agent_framework_devui/ui/assets/index-D0SfShuZ.js +0 -445
- agent_framework_devui/ui/assets/index-WsCIE0bH.css +0 -1
- agent_framework_devui-1.0.0b251007.dist-info/METADATA +0 -172
- agent_framework_devui-1.0.0b251007.dist-info/RECORD +0 -22
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251028.dist-info}/WHEEL +0 -0
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251028.dist-info}/entry_points.txt +0 -0
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251028.dist-info}/licenses/LICENSE +0 -0
agent_framework_devui/_server.py
CHANGED
|
@@ -7,7 +7,7 @@ import json
|
|
|
7
7
|
import logging
|
|
8
8
|
from collections.abc import AsyncGenerator
|
|
9
9
|
from contextlib import asynccontextmanager
|
|
10
|
-
from typing import Any
|
|
10
|
+
from typing import Any
|
|
11
11
|
|
|
12
12
|
from fastapi import FastAPI, HTTPException, Request
|
|
13
13
|
from fastapi.middleware.cors import CORSMiddleware
|
|
@@ -23,47 +23,6 @@ from .models._discovery_models import DiscoveryResponse, EntityInfo
|
|
|
23
23
|
logger = logging.getLogger(__name__)
|
|
24
24
|
|
|
25
25
|
|
|
26
|
-
def _extract_executor_message_types(executor: Any) -> list[Any]:
|
|
27
|
-
"""Return declared input types for the given executor."""
|
|
28
|
-
message_types: list[Any] = []
|
|
29
|
-
|
|
30
|
-
try:
|
|
31
|
-
input_types = getattr(executor, "input_types", None)
|
|
32
|
-
except Exception as exc: # pragma: no cover - defensive logging path
|
|
33
|
-
logger.debug(f"Failed to access executor input_types: {exc}")
|
|
34
|
-
else:
|
|
35
|
-
if input_types:
|
|
36
|
-
message_types = list(input_types)
|
|
37
|
-
|
|
38
|
-
if not message_types and hasattr(executor, "_handlers"):
|
|
39
|
-
try:
|
|
40
|
-
handlers = executor._handlers
|
|
41
|
-
if isinstance(handlers, dict):
|
|
42
|
-
message_types = list(handlers.keys())
|
|
43
|
-
except Exception as exc: # pragma: no cover - defensive logging path
|
|
44
|
-
logger.debug(f"Failed to read executor handlers: {exc}")
|
|
45
|
-
|
|
46
|
-
return message_types
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
def _select_primary_input_type(message_types: list[Any]) -> Any | None:
|
|
50
|
-
"""Choose the most user-friendly input type for rendering workflow inputs."""
|
|
51
|
-
if not message_types:
|
|
52
|
-
return None
|
|
53
|
-
|
|
54
|
-
preferred = (str, dict)
|
|
55
|
-
|
|
56
|
-
for candidate in preferred:
|
|
57
|
-
for message_type in message_types:
|
|
58
|
-
if message_type is candidate:
|
|
59
|
-
return candidate
|
|
60
|
-
origin = get_origin(message_type)
|
|
61
|
-
if origin is candidate:
|
|
62
|
-
return candidate
|
|
63
|
-
|
|
64
|
-
return message_types[0]
|
|
65
|
-
|
|
66
|
-
|
|
67
26
|
class DevServer:
|
|
68
27
|
"""Development Server - OpenAI compatible API server for debugging agents."""
|
|
69
28
|
|
|
@@ -126,19 +85,25 @@ class DevServer:
|
|
|
126
85
|
return self.executor
|
|
127
86
|
|
|
128
87
|
async def _cleanup_entities(self) -> None:
|
|
129
|
-
"""Cleanup entity resources (close clients, credentials, etc.)."""
|
|
88
|
+
"""Cleanup entity resources (close clients, MCP tools, credentials, etc.)."""
|
|
130
89
|
if not self.executor:
|
|
131
90
|
return
|
|
132
91
|
|
|
133
92
|
logger.info("Cleaning up entity resources...")
|
|
134
93
|
entities = self.executor.entity_discovery.list_entities()
|
|
135
94
|
closed_count = 0
|
|
95
|
+
mcp_tools_closed = 0
|
|
96
|
+
credentials_closed = 0
|
|
136
97
|
|
|
137
98
|
for entity_info in entities:
|
|
138
99
|
try:
|
|
139
100
|
entity_obj = self.executor.entity_discovery.get_entity_object(entity_info.id)
|
|
101
|
+
|
|
102
|
+
# Close chat clients and their credentials
|
|
140
103
|
if entity_obj and hasattr(entity_obj, "chat_client"):
|
|
141
104
|
client = entity_obj.chat_client
|
|
105
|
+
|
|
106
|
+
# Close the chat client itself
|
|
142
107
|
if hasattr(client, "close") and callable(client.close):
|
|
143
108
|
if inspect.iscoroutinefunction(client.close):
|
|
144
109
|
await client.close()
|
|
@@ -146,11 +111,47 @@ class DevServer:
|
|
|
146
111
|
client.close()
|
|
147
112
|
closed_count += 1
|
|
148
113
|
logger.debug(f"Closed client for entity: {entity_info.id}")
|
|
114
|
+
|
|
115
|
+
# Close credentials attached to chat clients (e.g., AzureCliCredential)
|
|
116
|
+
credential_attrs = ["credential", "async_credential", "_credential", "_async_credential"]
|
|
117
|
+
for attr in credential_attrs:
|
|
118
|
+
if hasattr(client, attr):
|
|
119
|
+
cred = getattr(client, attr)
|
|
120
|
+
if cred and hasattr(cred, "close") and callable(cred.close):
|
|
121
|
+
try:
|
|
122
|
+
if inspect.iscoroutinefunction(cred.close):
|
|
123
|
+
await cred.close()
|
|
124
|
+
else:
|
|
125
|
+
cred.close()
|
|
126
|
+
credentials_closed += 1
|
|
127
|
+
logger.debug(f"Closed credential for entity: {entity_info.id}")
|
|
128
|
+
except Exception as e:
|
|
129
|
+
logger.warning(f"Error closing credential for {entity_info.id}: {e}")
|
|
130
|
+
|
|
131
|
+
# Close MCP tools (framework tracks them in _local_mcp_tools)
|
|
132
|
+
if entity_obj and hasattr(entity_obj, "_local_mcp_tools"):
|
|
133
|
+
for mcp_tool in entity_obj._local_mcp_tools:
|
|
134
|
+
if hasattr(mcp_tool, "close") and callable(mcp_tool.close):
|
|
135
|
+
try:
|
|
136
|
+
if inspect.iscoroutinefunction(mcp_tool.close):
|
|
137
|
+
await mcp_tool.close()
|
|
138
|
+
else:
|
|
139
|
+
mcp_tool.close()
|
|
140
|
+
mcp_tools_closed += 1
|
|
141
|
+
tool_name = getattr(mcp_tool, "name", "unknown")
|
|
142
|
+
logger.debug(f"Closed MCP tool '{tool_name}' for entity: {entity_info.id}")
|
|
143
|
+
except Exception as e:
|
|
144
|
+
logger.warning(f"Error closing MCP tool for {entity_info.id}: {e}")
|
|
145
|
+
|
|
149
146
|
except Exception as e:
|
|
150
147
|
logger.warning(f"Error closing entity {entity_info.id}: {e}")
|
|
151
148
|
|
|
152
149
|
if closed_count > 0:
|
|
153
150
|
logger.info(f"Closed {closed_count} entity client(s)")
|
|
151
|
+
if credentials_closed > 0:
|
|
152
|
+
logger.info(f"Closed {credentials_closed} credential(s)")
|
|
153
|
+
if mcp_tools_closed > 0:
|
|
154
|
+
logger.info(f"Closed {mcp_tools_closed} MCP tool(s)")
|
|
154
155
|
|
|
155
156
|
def create_app(self) -> FastAPI:
|
|
156
157
|
"""Create the FastAPI application."""
|
|
@@ -215,7 +216,7 @@ class DevServer:
|
|
|
215
216
|
|
|
216
217
|
@app.get("/v1/entities/{entity_id}/info", response_model=EntityInfo)
|
|
217
218
|
async def get_entity_info(entity_id: str) -> EntityInfo:
|
|
218
|
-
"""Get detailed information about a specific entity."""
|
|
219
|
+
"""Get detailed information about a specific entity (triggers lazy loading)."""
|
|
219
220
|
try:
|
|
220
221
|
executor = await self._ensure_executor()
|
|
221
222
|
entity_info = executor.get_entity_info(entity_id)
|
|
@@ -223,86 +224,96 @@ class DevServer:
|
|
|
223
224
|
if not entity_info:
|
|
224
225
|
raise HTTPException(status_code=404, detail=f"Entity {entity_id} not found")
|
|
225
226
|
|
|
227
|
+
# Trigger lazy loading if entity not yet loaded
|
|
228
|
+
# This will import the module and enrich metadata
|
|
229
|
+
entity_obj = await executor.entity_discovery.load_entity(entity_id)
|
|
230
|
+
|
|
231
|
+
# Get updated entity info (may have been enriched during load)
|
|
232
|
+
entity_info = executor.get_entity_info(entity_id) or entity_info
|
|
233
|
+
|
|
226
234
|
# For workflows, populate additional detailed information
|
|
227
|
-
if entity_info.type == "workflow":
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
except Exception:
|
|
252
|
-
workflow_dump = raw_dump
|
|
253
|
-
else:
|
|
254
|
-
workflow_dump = parsed_dump if isinstance(parsed_dump, dict) else raw_dump
|
|
255
|
-
else:
|
|
235
|
+
if entity_info.type == "workflow" and entity_obj:
|
|
236
|
+
# Entity object already loaded by load_entity() above
|
|
237
|
+
# Get workflow structure
|
|
238
|
+
workflow_dump = None
|
|
239
|
+
if hasattr(entity_obj, "to_dict") and callable(getattr(entity_obj, "to_dict", None)):
|
|
240
|
+
try:
|
|
241
|
+
workflow_dump = entity_obj.to_dict() # type: ignore[attr-defined]
|
|
242
|
+
except Exception:
|
|
243
|
+
workflow_dump = None
|
|
244
|
+
elif hasattr(entity_obj, "to_json") and callable(getattr(entity_obj, "to_json", None)):
|
|
245
|
+
try:
|
|
246
|
+
raw_dump = entity_obj.to_json() # type: ignore[attr-defined]
|
|
247
|
+
except Exception:
|
|
248
|
+
workflow_dump = None
|
|
249
|
+
else:
|
|
250
|
+
if isinstance(raw_dump, (bytes, bytearray)):
|
|
251
|
+
try:
|
|
252
|
+
raw_dump = raw_dump.decode()
|
|
253
|
+
except Exception:
|
|
254
|
+
raw_dump = raw_dump.decode(errors="replace")
|
|
255
|
+
if isinstance(raw_dump, str):
|
|
256
|
+
try:
|
|
257
|
+
parsed_dump = json.loads(raw_dump)
|
|
258
|
+
except Exception:
|
|
256
259
|
workflow_dump = raw_dump
|
|
257
|
-
|
|
258
|
-
|
|
260
|
+
else:
|
|
261
|
+
workflow_dump = parsed_dump if isinstance(parsed_dump, dict) else raw_dump
|
|
262
|
+
else:
|
|
263
|
+
workflow_dump = raw_dump
|
|
264
|
+
elif hasattr(entity_obj, "__dict__"):
|
|
265
|
+
workflow_dump = {k: v for k, v in entity_obj.__dict__.items() if not k.startswith("_")}
|
|
259
266
|
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
267
|
+
# Get input schema information
|
|
268
|
+
input_schema = {}
|
|
269
|
+
input_type_name = "Unknown"
|
|
270
|
+
start_executor_id = ""
|
|
264
271
|
|
|
265
|
-
|
|
266
|
-
|
|
272
|
+
try:
|
|
273
|
+
from ._utils import (
|
|
274
|
+
extract_executor_message_types,
|
|
275
|
+
generate_input_schema,
|
|
276
|
+
select_primary_input_type,
|
|
277
|
+
)
|
|
267
278
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
279
|
+
start_executor = entity_obj.get_start_executor()
|
|
280
|
+
except Exception as e:
|
|
281
|
+
logger.debug(f"Could not extract input info for workflow {entity_id}: {e}")
|
|
282
|
+
else:
|
|
283
|
+
if start_executor:
|
|
284
|
+
start_executor_id = getattr(start_executor, "executor_id", "") or getattr(
|
|
285
|
+
start_executor, "id", ""
|
|
286
|
+
)
|
|
287
|
+
|
|
288
|
+
message_types = extract_executor_message_types(start_executor)
|
|
289
|
+
input_type = select_primary_input_type(message_types)
|
|
290
|
+
|
|
291
|
+
if input_type:
|
|
292
|
+
input_type_name = getattr(input_type, "__name__", str(input_type))
|
|
293
|
+
|
|
294
|
+
# Generate schema using comprehensive schema generation
|
|
295
|
+
input_schema = generate_input_schema(input_type)
|
|
296
|
+
|
|
297
|
+
if not input_schema:
|
|
298
|
+
input_schema = {"type": "string"}
|
|
299
|
+
if input_type_name == "Unknown":
|
|
300
|
+
input_type_name = "string"
|
|
301
|
+
|
|
302
|
+
# Get executor list
|
|
303
|
+
executor_list = []
|
|
304
|
+
if hasattr(entity_obj, "executors") and entity_obj.executors:
|
|
305
|
+
executor_list = [getattr(ex, "executor_id", str(ex)) for ex in entity_obj.executors]
|
|
306
|
+
|
|
307
|
+
# Create copy of entity info and populate workflow-specific fields
|
|
308
|
+
update_payload: dict[str, Any] = {
|
|
309
|
+
"workflow_dump": workflow_dump,
|
|
310
|
+
"input_schema": input_schema,
|
|
311
|
+
"input_type_name": input_type_name,
|
|
312
|
+
"start_executor_id": start_executor_id,
|
|
313
|
+
}
|
|
314
|
+
if executor_list:
|
|
315
|
+
update_payload["executors"] = executor_list
|
|
316
|
+
return entity_info.model_copy(update=update_payload)
|
|
306
317
|
|
|
307
318
|
# For non-workflow entities, return as-is
|
|
308
319
|
return entity_info
|
|
@@ -313,70 +324,34 @@ class DevServer:
|
|
|
313
324
|
logger.error(f"Error getting entity info for {entity_id}: {e}")
|
|
314
325
|
raise HTTPException(status_code=500, detail=f"Failed to get entity info: {e!s}") from e
|
|
315
326
|
|
|
316
|
-
@app.post("/v1/entities/
|
|
317
|
-
async def
|
|
318
|
-
"""
|
|
319
|
-
try:
|
|
320
|
-
url = request.get("url")
|
|
321
|
-
metadata = request.get("metadata", {})
|
|
322
|
-
|
|
323
|
-
if not url:
|
|
324
|
-
raise HTTPException(status_code=400, detail="URL is required")
|
|
325
|
-
|
|
326
|
-
logger.info(f"Attempting to add entity from URL: {url}")
|
|
327
|
-
executor = await self._ensure_executor()
|
|
328
|
-
entity_info, error_msg = await executor.entity_discovery.fetch_remote_entity(url, metadata)
|
|
329
|
-
|
|
330
|
-
if not entity_info:
|
|
331
|
-
# Sanitize error message - only return safe, user-friendly errors
|
|
332
|
-
logger.error(f"Failed to fetch or validate entity from {url}: {error_msg}")
|
|
333
|
-
safe_error = error_msg if error_msg else "Failed to fetch or validate entity"
|
|
334
|
-
raise HTTPException(status_code=400, detail=safe_error)
|
|
335
|
-
|
|
336
|
-
logger.info(f"Successfully added entity: {entity_info.id}")
|
|
337
|
-
return {"success": True, "entity": entity_info.model_dump()}
|
|
327
|
+
@app.post("/v1/entities/{entity_id}/reload")
|
|
328
|
+
async def reload_entity(entity_id: str) -> dict[str, Any]:
|
|
329
|
+
"""Hot reload entity (clears cache, will reimport on next access).
|
|
338
330
|
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
logger.error(f"Error adding entity: {e}", exc_info=True)
|
|
343
|
-
# Don't expose internal error details to client
|
|
344
|
-
raise HTTPException(
|
|
345
|
-
status_code=500, detail="An unexpected error occurred while adding the entity"
|
|
346
|
-
) from e
|
|
347
|
-
|
|
348
|
-
@app.delete("/v1/entities/{entity_id}")
|
|
349
|
-
async def remove_entity(entity_id: str) -> dict[str, Any]:
|
|
350
|
-
"""Remove entity by ID."""
|
|
331
|
+
This enables hot reload during development - edit entity code, call this endpoint,
|
|
332
|
+
and the next execution will use the updated code without server restart.
|
|
333
|
+
"""
|
|
351
334
|
try:
|
|
352
335
|
executor = await self._ensure_executor()
|
|
353
336
|
|
|
354
|
-
#
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
client = entity_obj.chat_client
|
|
359
|
-
if hasattr(client, "close") and callable(client.close):
|
|
360
|
-
if inspect.iscoroutinefunction(client.close):
|
|
361
|
-
await client.close()
|
|
362
|
-
else:
|
|
363
|
-
client.close()
|
|
364
|
-
logger.info(f"Closed client for entity: {entity_id}")
|
|
365
|
-
except Exception as e:
|
|
366
|
-
logger.warning(f"Error closing entity {entity_id} during removal: {e}")
|
|
337
|
+
# Check if entity exists
|
|
338
|
+
entity_info = executor.get_entity_info(entity_id)
|
|
339
|
+
if not entity_info:
|
|
340
|
+
raise HTTPException(status_code=404, detail=f"Entity {entity_id} not found")
|
|
367
341
|
|
|
368
|
-
#
|
|
369
|
-
|
|
342
|
+
# Invalidate cache
|
|
343
|
+
executor.entity_discovery.invalidate_entity(entity_id)
|
|
370
344
|
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
345
|
+
return {
|
|
346
|
+
"success": True,
|
|
347
|
+
"message": f"Entity '{entity_id}' cache cleared. Will reload on next access.",
|
|
348
|
+
}
|
|
374
349
|
|
|
375
350
|
except HTTPException:
|
|
376
351
|
raise
|
|
377
352
|
except Exception as e:
|
|
378
|
-
logger.error(f"Error
|
|
379
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
353
|
+
logger.error(f"Error reloading entity {entity_id}: {e}")
|
|
354
|
+
raise HTTPException(status_code=500, detail=f"Failed to reload entity: {e!s}") from e
|
|
380
355
|
|
|
381
356
|
@app.post("/v1/responses")
|
|
382
357
|
async def create_response(request: AgentFrameworkRequest, raw_request: Request) -> Any:
|
|
@@ -421,112 +396,161 @@ class DevServer:
|
|
|
421
396
|
error = OpenAIError.create(f"Execution failed: {e!s}")
|
|
422
397
|
return JSONResponse(status_code=500, content=error.to_dict())
|
|
423
398
|
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
399
|
+
# ========================================
|
|
400
|
+
# OpenAI Conversations API (Standard)
|
|
401
|
+
# ========================================
|
|
402
|
+
|
|
403
|
+
@app.post("/v1/conversations")
|
|
404
|
+
async def create_conversation(request_data: dict[str, Any]) -> dict[str, Any]:
|
|
405
|
+
"""Create a new conversation - OpenAI standard."""
|
|
427
406
|
try:
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
407
|
+
metadata = request_data.get("metadata")
|
|
408
|
+
executor = await self._ensure_executor()
|
|
409
|
+
conversation = executor.conversation_store.create_conversation(metadata=metadata)
|
|
410
|
+
return conversation.model_dump()
|
|
411
|
+
except HTTPException:
|
|
412
|
+
raise
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.error(f"Error creating conversation: {e}")
|
|
415
|
+
raise HTTPException(status_code=500, detail=f"Failed to create conversation: {e!s}") from e
|
|
431
416
|
|
|
417
|
+
@app.get("/v1/conversations")
|
|
418
|
+
async def list_conversations(agent_id: str | None = None) -> dict[str, Any]:
|
|
419
|
+
"""List conversations, optionally filtered by agent_id."""
|
|
420
|
+
try:
|
|
432
421
|
executor = await self._ensure_executor()
|
|
433
|
-
|
|
422
|
+
|
|
423
|
+
if agent_id:
|
|
424
|
+
# Filter by agent_id metadata
|
|
425
|
+
conversations = executor.conversation_store.list_conversations_by_metadata({"agent_id": agent_id})
|
|
426
|
+
else:
|
|
427
|
+
# Return all conversations (for InMemoryStore, list all)
|
|
428
|
+
# Note: This assumes list_conversations_by_metadata({}) returns all
|
|
429
|
+
conversations = executor.conversation_store.list_conversations_by_metadata({})
|
|
434
430
|
|
|
435
431
|
return {
|
|
436
|
-
"
|
|
437
|
-
"
|
|
438
|
-
"
|
|
439
|
-
"metadata": {"agent_id": agent_id},
|
|
432
|
+
"object": "list",
|
|
433
|
+
"data": [conv.model_dump() for conv in conversations],
|
|
434
|
+
"has_more": False,
|
|
440
435
|
}
|
|
441
436
|
except HTTPException:
|
|
442
437
|
raise
|
|
443
438
|
except Exception as e:
|
|
444
|
-
logger.error(f"Error
|
|
445
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
439
|
+
logger.error(f"Error listing conversations: {e}")
|
|
440
|
+
raise HTTPException(status_code=500, detail=f"Failed to list conversations: {e!s}") from e
|
|
446
441
|
|
|
447
|
-
@app.get("/v1/
|
|
448
|
-
async def
|
|
449
|
-
"""
|
|
442
|
+
@app.get("/v1/conversations/{conversation_id}")
|
|
443
|
+
async def retrieve_conversation(conversation_id: str) -> dict[str, Any]:
|
|
444
|
+
"""Get conversation - OpenAI standard."""
|
|
450
445
|
try:
|
|
451
446
|
executor = await self._ensure_executor()
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
return {"object": "list", "data": threads}
|
|
447
|
+
conversation = executor.conversation_store.get_conversation(conversation_id)
|
|
448
|
+
if not conversation:
|
|
449
|
+
raise HTTPException(status_code=404, detail="Conversation not found")
|
|
450
|
+
return conversation.model_dump()
|
|
451
|
+
except HTTPException:
|
|
452
|
+
raise
|
|
460
453
|
except Exception as e:
|
|
461
|
-
logger.error(f"Error
|
|
462
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
454
|
+
logger.error(f"Error getting conversation {conversation_id}: {e}")
|
|
455
|
+
raise HTTPException(status_code=500, detail=f"Failed to get conversation: {e!s}") from e
|
|
463
456
|
|
|
464
|
-
@app.
|
|
465
|
-
async def
|
|
466
|
-
"""
|
|
457
|
+
@app.post("/v1/conversations/{conversation_id}")
|
|
458
|
+
async def update_conversation(conversation_id: str, request_data: dict[str, Any]) -> dict[str, Any]:
|
|
459
|
+
"""Update conversation metadata - OpenAI standard."""
|
|
467
460
|
try:
|
|
468
461
|
executor = await self._ensure_executor()
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
# Get the agent that owns this thread
|
|
476
|
-
agent_id = executor.get_agent_for_thread(thread_id)
|
|
477
|
-
|
|
478
|
-
return {"id": thread_id, "object": "thread", "agent_id": agent_id}
|
|
462
|
+
metadata = request_data.get("metadata", {})
|
|
463
|
+
conversation = executor.conversation_store.update_conversation(conversation_id, metadata=metadata)
|
|
464
|
+
return conversation.model_dump()
|
|
465
|
+
except ValueError as e:
|
|
466
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
479
467
|
except HTTPException:
|
|
480
468
|
raise
|
|
481
469
|
except Exception as e:
|
|
482
|
-
logger.error(f"Error
|
|
483
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
470
|
+
logger.error(f"Error updating conversation {conversation_id}: {e}")
|
|
471
|
+
raise HTTPException(status_code=500, detail=f"Failed to update conversation: {e!s}") from e
|
|
484
472
|
|
|
485
|
-
@app.delete("/v1/
|
|
486
|
-
async def
|
|
487
|
-
"""Delete
|
|
473
|
+
@app.delete("/v1/conversations/{conversation_id}")
|
|
474
|
+
async def delete_conversation(conversation_id: str) -> dict[str, Any]:
|
|
475
|
+
"""Delete conversation - OpenAI standard."""
|
|
488
476
|
try:
|
|
489
477
|
executor = await self._ensure_executor()
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
return {"id": thread_id, "object": "thread.deleted", "deleted": True}
|
|
478
|
+
result = executor.conversation_store.delete_conversation(conversation_id)
|
|
479
|
+
return result.model_dump()
|
|
480
|
+
except ValueError as e:
|
|
481
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
496
482
|
except HTTPException:
|
|
497
483
|
raise
|
|
498
484
|
except Exception as e:
|
|
499
|
-
logger.error(f"Error deleting
|
|
500
|
-
raise HTTPException(status_code=500, detail=f"Failed to delete
|
|
485
|
+
logger.error(f"Error deleting conversation {conversation_id}: {e}")
|
|
486
|
+
raise HTTPException(status_code=500, detail=f"Failed to delete conversation: {e!s}") from e
|
|
501
487
|
|
|
502
|
-
@app.
|
|
503
|
-
async def
|
|
504
|
-
"""
|
|
488
|
+
@app.post("/v1/conversations/{conversation_id}/items")
|
|
489
|
+
async def create_conversation_items(conversation_id: str, request_data: dict[str, Any]) -> dict[str, Any]:
|
|
490
|
+
"""Add items to conversation - OpenAI standard."""
|
|
505
491
|
try:
|
|
506
492
|
executor = await self._ensure_executor()
|
|
493
|
+
items = request_data.get("items", [])
|
|
494
|
+
conv_items = await executor.conversation_store.add_items(conversation_id, items=items)
|
|
495
|
+
return {"object": "list", "data": [item.model_dump() for item in conv_items]}
|
|
496
|
+
except ValueError as e:
|
|
497
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
498
|
+
except HTTPException:
|
|
499
|
+
raise
|
|
500
|
+
except Exception as e:
|
|
501
|
+
logger.error(f"Error adding items to conversation {conversation_id}: {e}")
|
|
502
|
+
raise HTTPException(status_code=500, detail=f"Failed to add items: {e!s}") from e
|
|
503
|
+
|
|
504
|
+
@app.get("/v1/conversations/{conversation_id}/items")
|
|
505
|
+
async def list_conversation_items(
|
|
506
|
+
conversation_id: str, limit: int = 100, after: str | None = None, order: str = "asc"
|
|
507
|
+
) -> dict[str, Any]:
|
|
508
|
+
"""List conversation items - OpenAI standard."""
|
|
509
|
+
try:
|
|
510
|
+
executor = await self._ensure_executor()
|
|
511
|
+
items, has_more = await executor.conversation_store.list_items(
|
|
512
|
+
conversation_id, limit=limit, after=after, order=order
|
|
513
|
+
)
|
|
514
|
+
return {
|
|
515
|
+
"object": "list",
|
|
516
|
+
"data": [item.model_dump() for item in items],
|
|
517
|
+
"has_more": has_more,
|
|
518
|
+
}
|
|
519
|
+
except ValueError as e:
|
|
520
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
521
|
+
except HTTPException:
|
|
522
|
+
raise
|
|
523
|
+
except Exception as e:
|
|
524
|
+
logger.error(f"Error listing items for conversation {conversation_id}: {e}")
|
|
525
|
+
raise HTTPException(status_code=500, detail=f"Failed to list items: {e!s}") from e
|
|
507
526
|
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
return
|
|
527
|
+
@app.get("/v1/conversations/{conversation_id}/items/{item_id}")
|
|
528
|
+
async def retrieve_conversation_item(conversation_id: str, item_id: str) -> dict[str, Any]:
|
|
529
|
+
"""Get specific conversation item - OpenAI standard."""
|
|
530
|
+
try:
|
|
531
|
+
executor = await self._ensure_executor()
|
|
532
|
+
item = executor.conversation_store.get_item(conversation_id, item_id)
|
|
533
|
+
if not item:
|
|
534
|
+
raise HTTPException(status_code=404, detail="Item not found")
|
|
535
|
+
return item.model_dump()
|
|
517
536
|
except HTTPException:
|
|
518
537
|
raise
|
|
519
538
|
except Exception as e:
|
|
520
|
-
logger.error(f"Error getting
|
|
521
|
-
raise HTTPException(status_code=500, detail=f"Failed to get
|
|
539
|
+
logger.error(f"Error getting item {item_id} from conversation {conversation_id}: {e}")
|
|
540
|
+
raise HTTPException(status_code=500, detail=f"Failed to get item: {e!s}") from e
|
|
522
541
|
|
|
523
542
|
async def _stream_execution(
|
|
524
543
|
self, executor: AgentFrameworkExecutor, request: AgentFrameworkRequest
|
|
525
544
|
) -> AsyncGenerator[str, None]:
|
|
526
545
|
"""Stream execution directly through executor."""
|
|
527
546
|
try:
|
|
528
|
-
#
|
|
547
|
+
# Collect events for final response.completed event
|
|
548
|
+
events = []
|
|
549
|
+
|
|
550
|
+
# Stream all events
|
|
529
551
|
async for event in executor.execute_streaming(request):
|
|
552
|
+
events.append(event)
|
|
553
|
+
|
|
530
554
|
# IMPORTANT: Check model_dump_json FIRST because to_json() can have newlines (pretty-printing)
|
|
531
555
|
# which breaks SSE format. model_dump_json() returns single-line JSON.
|
|
532
556
|
if hasattr(event, "model_dump_json"):
|
|
@@ -544,6 +568,17 @@ class DevServer:
|
|
|
544
568
|
payload = json.dumps(str(event))
|
|
545
569
|
yield f"data: {payload}\n\n"
|
|
546
570
|
|
|
571
|
+
# Aggregate to final response and emit response.completed event (OpenAI standard)
|
|
572
|
+
from .models import ResponseCompletedEvent
|
|
573
|
+
|
|
574
|
+
final_response = await executor.message_mapper.aggregate_to_response(events, request)
|
|
575
|
+
completed_event = ResponseCompletedEvent(
|
|
576
|
+
type="response.completed",
|
|
577
|
+
response=final_response,
|
|
578
|
+
sequence_number=len(events),
|
|
579
|
+
)
|
|
580
|
+
yield f"data: {completed_event.model_dump_json()}\n\n"
|
|
581
|
+
|
|
547
582
|
# Send final done event
|
|
548
583
|
yield "data: [DONE]\n\n"
|
|
549
584
|
|