agent-framework-devui 1.0.0b251007__py3-none-any.whl → 1.0.0b251016__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of agent-framework-devui might be problematic. Click here for more details.
- agent_framework_devui/_conversations.py +473 -0
- agent_framework_devui/_discovery.py +295 -325
- agent_framework_devui/_executor.py +99 -241
- agent_framework_devui/_mapper.py +281 -78
- agent_framework_devui/_server.py +232 -239
- agent_framework_devui/_utils.py +127 -0
- agent_framework_devui/models/__init__.py +15 -10
- agent_framework_devui/models/_discovery_models.py +1 -2
- agent_framework_devui/models/_openai_custom.py +45 -90
- agent_framework_devui/ui/assets/index-CE4pGoXh.css +1 -0
- agent_framework_devui/ui/assets/index-DmL7WSFa.js +577 -0
- agent_framework_devui/ui/index.html +2 -2
- agent_framework_devui-1.0.0b251016.dist-info/METADATA +286 -0
- agent_framework_devui-1.0.0b251016.dist-info/RECORD +23 -0
- agent_framework_devui/ui/assets/index-D0SfShuZ.js +0 -445
- agent_framework_devui/ui/assets/index-WsCIE0bH.css +0 -1
- agent_framework_devui-1.0.0b251007.dist-info/METADATA +0 -172
- agent_framework_devui-1.0.0b251007.dist-info/RECORD +0 -22
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251016.dist-info}/WHEEL +0 -0
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251016.dist-info}/entry_points.txt +0 -0
- {agent_framework_devui-1.0.0b251007.dist-info → agent_framework_devui-1.0.0b251016.dist-info}/licenses/LICENSE +0 -0
agent_framework_devui/_server.py
CHANGED
|
@@ -7,7 +7,7 @@ import json
|
|
|
7
7
|
import logging
|
|
8
8
|
from collections.abc import AsyncGenerator
|
|
9
9
|
from contextlib import asynccontextmanager
|
|
10
|
-
from typing import Any
|
|
10
|
+
from typing import Any
|
|
11
11
|
|
|
12
12
|
from fastapi import FastAPI, HTTPException, Request
|
|
13
13
|
from fastapi.middleware.cors import CORSMiddleware
|
|
@@ -23,47 +23,6 @@ from .models._discovery_models import DiscoveryResponse, EntityInfo
|
|
|
23
23
|
logger = logging.getLogger(__name__)
|
|
24
24
|
|
|
25
25
|
|
|
26
|
-
def _extract_executor_message_types(executor: Any) -> list[Any]:
|
|
27
|
-
"""Return declared input types for the given executor."""
|
|
28
|
-
message_types: list[Any] = []
|
|
29
|
-
|
|
30
|
-
try:
|
|
31
|
-
input_types = getattr(executor, "input_types", None)
|
|
32
|
-
except Exception as exc: # pragma: no cover - defensive logging path
|
|
33
|
-
logger.debug(f"Failed to access executor input_types: {exc}")
|
|
34
|
-
else:
|
|
35
|
-
if input_types:
|
|
36
|
-
message_types = list(input_types)
|
|
37
|
-
|
|
38
|
-
if not message_types and hasattr(executor, "_handlers"):
|
|
39
|
-
try:
|
|
40
|
-
handlers = executor._handlers
|
|
41
|
-
if isinstance(handlers, dict):
|
|
42
|
-
message_types = list(handlers.keys())
|
|
43
|
-
except Exception as exc: # pragma: no cover - defensive logging path
|
|
44
|
-
logger.debug(f"Failed to read executor handlers: {exc}")
|
|
45
|
-
|
|
46
|
-
return message_types
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
def _select_primary_input_type(message_types: list[Any]) -> Any | None:
|
|
50
|
-
"""Choose the most user-friendly input type for rendering workflow inputs."""
|
|
51
|
-
if not message_types:
|
|
52
|
-
return None
|
|
53
|
-
|
|
54
|
-
preferred = (str, dict)
|
|
55
|
-
|
|
56
|
-
for candidate in preferred:
|
|
57
|
-
for message_type in message_types:
|
|
58
|
-
if message_type is candidate:
|
|
59
|
-
return candidate
|
|
60
|
-
origin = get_origin(message_type)
|
|
61
|
-
if origin is candidate:
|
|
62
|
-
return candidate
|
|
63
|
-
|
|
64
|
-
return message_types[0]
|
|
65
|
-
|
|
66
|
-
|
|
67
26
|
class DevServer:
|
|
68
27
|
"""Development Server - OpenAI compatible API server for debugging agents."""
|
|
69
28
|
|
|
@@ -215,7 +174,7 @@ class DevServer:
|
|
|
215
174
|
|
|
216
175
|
@app.get("/v1/entities/{entity_id}/info", response_model=EntityInfo)
|
|
217
176
|
async def get_entity_info(entity_id: str) -> EntityInfo:
|
|
218
|
-
"""Get detailed information about a specific entity."""
|
|
177
|
+
"""Get detailed information about a specific entity (triggers lazy loading)."""
|
|
219
178
|
try:
|
|
220
179
|
executor = await self._ensure_executor()
|
|
221
180
|
entity_info = executor.get_entity_info(entity_id)
|
|
@@ -223,86 +182,96 @@ class DevServer:
|
|
|
223
182
|
if not entity_info:
|
|
224
183
|
raise HTTPException(status_code=404, detail=f"Entity {entity_id} not found")
|
|
225
184
|
|
|
185
|
+
# Trigger lazy loading if entity not yet loaded
|
|
186
|
+
# This will import the module and enrich metadata
|
|
187
|
+
entity_obj = await executor.entity_discovery.load_entity(entity_id)
|
|
188
|
+
|
|
189
|
+
# Get updated entity info (may have been enriched during load)
|
|
190
|
+
entity_info = executor.get_entity_info(entity_id) or entity_info
|
|
191
|
+
|
|
226
192
|
# For workflows, populate additional detailed information
|
|
227
|
-
if entity_info.type == "workflow":
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
except Exception:
|
|
252
|
-
workflow_dump = raw_dump
|
|
253
|
-
else:
|
|
254
|
-
workflow_dump = parsed_dump if isinstance(parsed_dump, dict) else raw_dump
|
|
255
|
-
else:
|
|
193
|
+
if entity_info.type == "workflow" and entity_obj:
|
|
194
|
+
# Entity object already loaded by load_entity() above
|
|
195
|
+
# Get workflow structure
|
|
196
|
+
workflow_dump = None
|
|
197
|
+
if hasattr(entity_obj, "to_dict") and callable(getattr(entity_obj, "to_dict", None)):
|
|
198
|
+
try:
|
|
199
|
+
workflow_dump = entity_obj.to_dict() # type: ignore[attr-defined]
|
|
200
|
+
except Exception:
|
|
201
|
+
workflow_dump = None
|
|
202
|
+
elif hasattr(entity_obj, "to_json") and callable(getattr(entity_obj, "to_json", None)):
|
|
203
|
+
try:
|
|
204
|
+
raw_dump = entity_obj.to_json() # type: ignore[attr-defined]
|
|
205
|
+
except Exception:
|
|
206
|
+
workflow_dump = None
|
|
207
|
+
else:
|
|
208
|
+
if isinstance(raw_dump, (bytes, bytearray)):
|
|
209
|
+
try:
|
|
210
|
+
raw_dump = raw_dump.decode()
|
|
211
|
+
except Exception:
|
|
212
|
+
raw_dump = raw_dump.decode(errors="replace")
|
|
213
|
+
if isinstance(raw_dump, str):
|
|
214
|
+
try:
|
|
215
|
+
parsed_dump = json.loads(raw_dump)
|
|
216
|
+
except Exception:
|
|
256
217
|
workflow_dump = raw_dump
|
|
257
|
-
|
|
258
|
-
|
|
218
|
+
else:
|
|
219
|
+
workflow_dump = parsed_dump if isinstance(parsed_dump, dict) else raw_dump
|
|
220
|
+
else:
|
|
221
|
+
workflow_dump = raw_dump
|
|
222
|
+
elif hasattr(entity_obj, "__dict__"):
|
|
223
|
+
workflow_dump = {k: v for k, v in entity_obj.__dict__.items() if not k.startswith("_")}
|
|
259
224
|
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
225
|
+
# Get input schema information
|
|
226
|
+
input_schema = {}
|
|
227
|
+
input_type_name = "Unknown"
|
|
228
|
+
start_executor_id = ""
|
|
264
229
|
|
|
265
|
-
|
|
266
|
-
|
|
230
|
+
try:
|
|
231
|
+
from ._utils import (
|
|
232
|
+
extract_executor_message_types,
|
|
233
|
+
generate_input_schema,
|
|
234
|
+
select_primary_input_type,
|
|
235
|
+
)
|
|
267
236
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
274
|
-
|
|
275
|
-
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
|
|
304
|
-
|
|
305
|
-
|
|
237
|
+
start_executor = entity_obj.get_start_executor()
|
|
238
|
+
except Exception as e:
|
|
239
|
+
logger.debug(f"Could not extract input info for workflow {entity_id}: {e}")
|
|
240
|
+
else:
|
|
241
|
+
if start_executor:
|
|
242
|
+
start_executor_id = getattr(start_executor, "executor_id", "") or getattr(
|
|
243
|
+
start_executor, "id", ""
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
message_types = extract_executor_message_types(start_executor)
|
|
247
|
+
input_type = select_primary_input_type(message_types)
|
|
248
|
+
|
|
249
|
+
if input_type:
|
|
250
|
+
input_type_name = getattr(input_type, "__name__", str(input_type))
|
|
251
|
+
|
|
252
|
+
# Generate schema using comprehensive schema generation
|
|
253
|
+
input_schema = generate_input_schema(input_type)
|
|
254
|
+
|
|
255
|
+
if not input_schema:
|
|
256
|
+
input_schema = {"type": "string"}
|
|
257
|
+
if input_type_name == "Unknown":
|
|
258
|
+
input_type_name = "string"
|
|
259
|
+
|
|
260
|
+
# Get executor list
|
|
261
|
+
executor_list = []
|
|
262
|
+
if hasattr(entity_obj, "executors") and entity_obj.executors:
|
|
263
|
+
executor_list = [getattr(ex, "executor_id", str(ex)) for ex in entity_obj.executors]
|
|
264
|
+
|
|
265
|
+
# Create copy of entity info and populate workflow-specific fields
|
|
266
|
+
update_payload: dict[str, Any] = {
|
|
267
|
+
"workflow_dump": workflow_dump,
|
|
268
|
+
"input_schema": input_schema,
|
|
269
|
+
"input_type_name": input_type_name,
|
|
270
|
+
"start_executor_id": start_executor_id,
|
|
271
|
+
}
|
|
272
|
+
if executor_list:
|
|
273
|
+
update_payload["executors"] = executor_list
|
|
274
|
+
return entity_info.model_copy(update=update_payload)
|
|
306
275
|
|
|
307
276
|
# For non-workflow entities, return as-is
|
|
308
277
|
return entity_info
|
|
@@ -313,70 +282,34 @@ class DevServer:
|
|
|
313
282
|
logger.error(f"Error getting entity info for {entity_id}: {e}")
|
|
314
283
|
raise HTTPException(status_code=500, detail=f"Failed to get entity info: {e!s}") from e
|
|
315
284
|
|
|
316
|
-
@app.post("/v1/entities/
|
|
317
|
-
async def
|
|
318
|
-
"""
|
|
319
|
-
try:
|
|
320
|
-
url = request.get("url")
|
|
321
|
-
metadata = request.get("metadata", {})
|
|
322
|
-
|
|
323
|
-
if not url:
|
|
324
|
-
raise HTTPException(status_code=400, detail="URL is required")
|
|
285
|
+
@app.post("/v1/entities/{entity_id}/reload")
|
|
286
|
+
async def reload_entity(entity_id: str) -> dict[str, Any]:
|
|
287
|
+
"""Hot reload entity (clears cache, will reimport on next access).
|
|
325
288
|
|
|
326
|
-
|
|
327
|
-
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
if not entity_info:
|
|
331
|
-
# Sanitize error message - only return safe, user-friendly errors
|
|
332
|
-
logger.error(f"Failed to fetch or validate entity from {url}: {error_msg}")
|
|
333
|
-
safe_error = error_msg if error_msg else "Failed to fetch or validate entity"
|
|
334
|
-
raise HTTPException(status_code=400, detail=safe_error)
|
|
335
|
-
|
|
336
|
-
logger.info(f"Successfully added entity: {entity_info.id}")
|
|
337
|
-
return {"success": True, "entity": entity_info.model_dump()}
|
|
338
|
-
|
|
339
|
-
except HTTPException:
|
|
340
|
-
raise
|
|
341
|
-
except Exception as e:
|
|
342
|
-
logger.error(f"Error adding entity: {e}", exc_info=True)
|
|
343
|
-
# Don't expose internal error details to client
|
|
344
|
-
raise HTTPException(
|
|
345
|
-
status_code=500, detail="An unexpected error occurred while adding the entity"
|
|
346
|
-
) from e
|
|
347
|
-
|
|
348
|
-
@app.delete("/v1/entities/{entity_id}")
|
|
349
|
-
async def remove_entity(entity_id: str) -> dict[str, Any]:
|
|
350
|
-
"""Remove entity by ID."""
|
|
289
|
+
This enables hot reload during development - edit entity code, call this endpoint,
|
|
290
|
+
and the next execution will use the updated code without server restart.
|
|
291
|
+
"""
|
|
351
292
|
try:
|
|
352
293
|
executor = await self._ensure_executor()
|
|
353
294
|
|
|
354
|
-
#
|
|
355
|
-
|
|
356
|
-
|
|
357
|
-
|
|
358
|
-
client = entity_obj.chat_client
|
|
359
|
-
if hasattr(client, "close") and callable(client.close):
|
|
360
|
-
if inspect.iscoroutinefunction(client.close):
|
|
361
|
-
await client.close()
|
|
362
|
-
else:
|
|
363
|
-
client.close()
|
|
364
|
-
logger.info(f"Closed client for entity: {entity_id}")
|
|
365
|
-
except Exception as e:
|
|
366
|
-
logger.warning(f"Error closing entity {entity_id} during removal: {e}")
|
|
295
|
+
# Check if entity exists
|
|
296
|
+
entity_info = executor.get_entity_info(entity_id)
|
|
297
|
+
if not entity_info:
|
|
298
|
+
raise HTTPException(status_code=404, detail=f"Entity {entity_id} not found")
|
|
367
299
|
|
|
368
|
-
#
|
|
369
|
-
|
|
300
|
+
# Invalidate cache
|
|
301
|
+
executor.entity_discovery.invalidate_entity(entity_id)
|
|
370
302
|
|
|
371
|
-
|
|
372
|
-
|
|
373
|
-
|
|
303
|
+
return {
|
|
304
|
+
"success": True,
|
|
305
|
+
"message": f"Entity '{entity_id}' cache cleared. Will reload on next access.",
|
|
306
|
+
}
|
|
374
307
|
|
|
375
308
|
except HTTPException:
|
|
376
309
|
raise
|
|
377
310
|
except Exception as e:
|
|
378
|
-
logger.error(f"Error
|
|
379
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
311
|
+
logger.error(f"Error reloading entity {entity_id}: {e}")
|
|
312
|
+
raise HTTPException(status_code=500, detail=f"Failed to reload entity: {e!s}") from e
|
|
380
313
|
|
|
381
314
|
@app.post("/v1/responses")
|
|
382
315
|
async def create_response(request: AgentFrameworkRequest, raw_request: Request) -> Any:
|
|
@@ -421,112 +354,161 @@ class DevServer:
|
|
|
421
354
|
error = OpenAIError.create(f"Execution failed: {e!s}")
|
|
422
355
|
return JSONResponse(status_code=500, content=error.to_dict())
|
|
423
356
|
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
|
|
357
|
+
# ========================================
|
|
358
|
+
# OpenAI Conversations API (Standard)
|
|
359
|
+
# ========================================
|
|
360
|
+
|
|
361
|
+
@app.post("/v1/conversations")
|
|
362
|
+
async def create_conversation(request_data: dict[str, Any]) -> dict[str, Any]:
|
|
363
|
+
"""Create a new conversation - OpenAI standard."""
|
|
427
364
|
try:
|
|
428
|
-
|
|
429
|
-
|
|
430
|
-
|
|
365
|
+
metadata = request_data.get("metadata")
|
|
366
|
+
executor = await self._ensure_executor()
|
|
367
|
+
conversation = executor.conversation_store.create_conversation(metadata=metadata)
|
|
368
|
+
return conversation.model_dump()
|
|
369
|
+
except HTTPException:
|
|
370
|
+
raise
|
|
371
|
+
except Exception as e:
|
|
372
|
+
logger.error(f"Error creating conversation: {e}")
|
|
373
|
+
raise HTTPException(status_code=500, detail=f"Failed to create conversation: {e!s}") from e
|
|
431
374
|
|
|
375
|
+
@app.get("/v1/conversations")
|
|
376
|
+
async def list_conversations(agent_id: str | None = None) -> dict[str, Any]:
|
|
377
|
+
"""List conversations, optionally filtered by agent_id."""
|
|
378
|
+
try:
|
|
432
379
|
executor = await self._ensure_executor()
|
|
433
|
-
|
|
380
|
+
|
|
381
|
+
if agent_id:
|
|
382
|
+
# Filter by agent_id metadata
|
|
383
|
+
conversations = executor.conversation_store.list_conversations_by_metadata({"agent_id": agent_id})
|
|
384
|
+
else:
|
|
385
|
+
# Return all conversations (for InMemoryStore, list all)
|
|
386
|
+
# Note: This assumes list_conversations_by_metadata({}) returns all
|
|
387
|
+
conversations = executor.conversation_store.list_conversations_by_metadata({})
|
|
434
388
|
|
|
435
389
|
return {
|
|
436
|
-
"
|
|
437
|
-
"
|
|
438
|
-
"
|
|
439
|
-
"metadata": {"agent_id": agent_id},
|
|
390
|
+
"object": "list",
|
|
391
|
+
"data": [conv.model_dump() for conv in conversations],
|
|
392
|
+
"has_more": False,
|
|
440
393
|
}
|
|
441
394
|
except HTTPException:
|
|
442
395
|
raise
|
|
443
396
|
except Exception as e:
|
|
444
|
-
logger.error(f"Error
|
|
445
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
397
|
+
logger.error(f"Error listing conversations: {e}")
|
|
398
|
+
raise HTTPException(status_code=500, detail=f"Failed to list conversations: {e!s}") from e
|
|
446
399
|
|
|
447
|
-
@app.get("/v1/
|
|
448
|
-
async def
|
|
449
|
-
"""
|
|
400
|
+
@app.get("/v1/conversations/{conversation_id}")
|
|
401
|
+
async def retrieve_conversation(conversation_id: str) -> dict[str, Any]:
|
|
402
|
+
"""Get conversation - OpenAI standard."""
|
|
450
403
|
try:
|
|
451
404
|
executor = await self._ensure_executor()
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
|
|
458
|
-
|
|
459
|
-
return {"object": "list", "data": threads}
|
|
405
|
+
conversation = executor.conversation_store.get_conversation(conversation_id)
|
|
406
|
+
if not conversation:
|
|
407
|
+
raise HTTPException(status_code=404, detail="Conversation not found")
|
|
408
|
+
return conversation.model_dump()
|
|
409
|
+
except HTTPException:
|
|
410
|
+
raise
|
|
460
411
|
except Exception as e:
|
|
461
|
-
logger.error(f"Error
|
|
462
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
412
|
+
logger.error(f"Error getting conversation {conversation_id}: {e}")
|
|
413
|
+
raise HTTPException(status_code=500, detail=f"Failed to get conversation: {e!s}") from e
|
|
463
414
|
|
|
464
|
-
@app.
|
|
465
|
-
async def
|
|
466
|
-
"""
|
|
415
|
+
@app.post("/v1/conversations/{conversation_id}")
|
|
416
|
+
async def update_conversation(conversation_id: str, request_data: dict[str, Any]) -> dict[str, Any]:
|
|
417
|
+
"""Update conversation metadata - OpenAI standard."""
|
|
467
418
|
try:
|
|
468
419
|
executor = await self._ensure_executor()
|
|
469
|
-
|
|
470
|
-
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
474
|
-
|
|
475
|
-
# Get the agent that owns this thread
|
|
476
|
-
agent_id = executor.get_agent_for_thread(thread_id)
|
|
477
|
-
|
|
478
|
-
return {"id": thread_id, "object": "thread", "agent_id": agent_id}
|
|
420
|
+
metadata = request_data.get("metadata", {})
|
|
421
|
+
conversation = executor.conversation_store.update_conversation(conversation_id, metadata=metadata)
|
|
422
|
+
return conversation.model_dump()
|
|
423
|
+
except ValueError as e:
|
|
424
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
479
425
|
except HTTPException:
|
|
480
426
|
raise
|
|
481
427
|
except Exception as e:
|
|
482
|
-
logger.error(f"Error
|
|
483
|
-
raise HTTPException(status_code=500, detail=f"Failed to
|
|
428
|
+
logger.error(f"Error updating conversation {conversation_id}: {e}")
|
|
429
|
+
raise HTTPException(status_code=500, detail=f"Failed to update conversation: {e!s}") from e
|
|
484
430
|
|
|
485
|
-
@app.delete("/v1/
|
|
486
|
-
async def
|
|
487
|
-
"""Delete
|
|
431
|
+
@app.delete("/v1/conversations/{conversation_id}")
|
|
432
|
+
async def delete_conversation(conversation_id: str) -> dict[str, Any]:
|
|
433
|
+
"""Delete conversation - OpenAI standard."""
|
|
488
434
|
try:
|
|
489
435
|
executor = await self._ensure_executor()
|
|
490
|
-
|
|
491
|
-
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
return {"id": thread_id, "object": "thread.deleted", "deleted": True}
|
|
436
|
+
result = executor.conversation_store.delete_conversation(conversation_id)
|
|
437
|
+
return result.model_dump()
|
|
438
|
+
except ValueError as e:
|
|
439
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
496
440
|
except HTTPException:
|
|
497
441
|
raise
|
|
498
442
|
except Exception as e:
|
|
499
|
-
logger.error(f"Error deleting
|
|
500
|
-
raise HTTPException(status_code=500, detail=f"Failed to delete
|
|
443
|
+
logger.error(f"Error deleting conversation {conversation_id}: {e}")
|
|
444
|
+
raise HTTPException(status_code=500, detail=f"Failed to delete conversation: {e!s}") from e
|
|
501
445
|
|
|
502
|
-
@app.
|
|
503
|
-
async def
|
|
504
|
-
"""
|
|
446
|
+
@app.post("/v1/conversations/{conversation_id}/items")
|
|
447
|
+
async def create_conversation_items(conversation_id: str, request_data: dict[str, Any]) -> dict[str, Any]:
|
|
448
|
+
"""Add items to conversation - OpenAI standard."""
|
|
505
449
|
try:
|
|
506
450
|
executor = await self._ensure_executor()
|
|
451
|
+
items = request_data.get("items", [])
|
|
452
|
+
conv_items = await executor.conversation_store.add_items(conversation_id, items=items)
|
|
453
|
+
return {"object": "list", "data": [item.model_dump() for item in conv_items]}
|
|
454
|
+
except ValueError as e:
|
|
455
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
456
|
+
except HTTPException:
|
|
457
|
+
raise
|
|
458
|
+
except Exception as e:
|
|
459
|
+
logger.error(f"Error adding items to conversation {conversation_id}: {e}")
|
|
460
|
+
raise HTTPException(status_code=500, detail=f"Failed to add items: {e!s}") from e
|
|
461
|
+
|
|
462
|
+
@app.get("/v1/conversations/{conversation_id}/items")
|
|
463
|
+
async def list_conversation_items(
|
|
464
|
+
conversation_id: str, limit: int = 100, after: str | None = None, order: str = "asc"
|
|
465
|
+
) -> dict[str, Any]:
|
|
466
|
+
"""List conversation items - OpenAI standard."""
|
|
467
|
+
try:
|
|
468
|
+
executor = await self._ensure_executor()
|
|
469
|
+
items, has_more = await executor.conversation_store.list_items(
|
|
470
|
+
conversation_id, limit=limit, after=after, order=order
|
|
471
|
+
)
|
|
472
|
+
return {
|
|
473
|
+
"object": "list",
|
|
474
|
+
"data": [item.model_dump() for item in items],
|
|
475
|
+
"has_more": has_more,
|
|
476
|
+
}
|
|
477
|
+
except ValueError as e:
|
|
478
|
+
raise HTTPException(status_code=404, detail=str(e)) from e
|
|
479
|
+
except HTTPException:
|
|
480
|
+
raise
|
|
481
|
+
except Exception as e:
|
|
482
|
+
logger.error(f"Error listing items for conversation {conversation_id}: {e}")
|
|
483
|
+
raise HTTPException(status_code=500, detail=f"Failed to list items: {e!s}") from e
|
|
507
484
|
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
return
|
|
485
|
+
@app.get("/v1/conversations/{conversation_id}/items/{item_id}")
|
|
486
|
+
async def retrieve_conversation_item(conversation_id: str, item_id: str) -> dict[str, Any]:
|
|
487
|
+
"""Get specific conversation item - OpenAI standard."""
|
|
488
|
+
try:
|
|
489
|
+
executor = await self._ensure_executor()
|
|
490
|
+
item = executor.conversation_store.get_item(conversation_id, item_id)
|
|
491
|
+
if not item:
|
|
492
|
+
raise HTTPException(status_code=404, detail="Item not found")
|
|
493
|
+
return item.model_dump()
|
|
517
494
|
except HTTPException:
|
|
518
495
|
raise
|
|
519
496
|
except Exception as e:
|
|
520
|
-
logger.error(f"Error getting
|
|
521
|
-
raise HTTPException(status_code=500, detail=f"Failed to get
|
|
497
|
+
logger.error(f"Error getting item {item_id} from conversation {conversation_id}: {e}")
|
|
498
|
+
raise HTTPException(status_code=500, detail=f"Failed to get item: {e!s}") from e
|
|
522
499
|
|
|
523
500
|
async def _stream_execution(
|
|
524
501
|
self, executor: AgentFrameworkExecutor, request: AgentFrameworkRequest
|
|
525
502
|
) -> AsyncGenerator[str, None]:
|
|
526
503
|
"""Stream execution directly through executor."""
|
|
527
504
|
try:
|
|
528
|
-
#
|
|
505
|
+
# Collect events for final response.completed event
|
|
506
|
+
events = []
|
|
507
|
+
|
|
508
|
+
# Stream all events
|
|
529
509
|
async for event in executor.execute_streaming(request):
|
|
510
|
+
events.append(event)
|
|
511
|
+
|
|
530
512
|
# IMPORTANT: Check model_dump_json FIRST because to_json() can have newlines (pretty-printing)
|
|
531
513
|
# which breaks SSE format. model_dump_json() returns single-line JSON.
|
|
532
514
|
if hasattr(event, "model_dump_json"):
|
|
@@ -544,6 +526,17 @@ class DevServer:
|
|
|
544
526
|
payload = json.dumps(str(event))
|
|
545
527
|
yield f"data: {payload}\n\n"
|
|
546
528
|
|
|
529
|
+
# Aggregate to final response and emit response.completed event (OpenAI standard)
|
|
530
|
+
from .models import ResponseCompletedEvent
|
|
531
|
+
|
|
532
|
+
final_response = await executor.message_mapper.aggregate_to_response(events, request)
|
|
533
|
+
completed_event = ResponseCompletedEvent(
|
|
534
|
+
type="response.completed",
|
|
535
|
+
response=final_response,
|
|
536
|
+
sequence_number=len(events),
|
|
537
|
+
)
|
|
538
|
+
yield f"data: {completed_event.model_dump_json()}\n\n"
|
|
539
|
+
|
|
547
540
|
# Send final done event
|
|
548
541
|
yield "data: [DONE]\n\n"
|
|
549
542
|
|