tracebrain 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tracebrain/__init__.py +63 -0
- tracebrain/api/v1/__init__.py +9 -0
- tracebrain/api/v1/ai_features.py +168 -0
- tracebrain/api/v1/api_router.py +19 -0
- tracebrain/api/v1/common.py +53 -0
- tracebrain/api/v1/curriculum.py +138 -0
- tracebrain/api/v1/episodes.py +154 -0
- tracebrain/api/v1/operations.py +115 -0
- tracebrain/api/v1/schemas/__init__.py +77 -0
- tracebrain/api/v1/schemas/api_models.py +414 -0
- tracebrain/api/v1/system.py +201 -0
- tracebrain/api/v1/traces.py +387 -0
- tracebrain/cli.py +655 -0
- tracebrain/config.py +224 -0
- tracebrain/core/__init__.py +0 -0
- tracebrain/core/curator.py +301 -0
- tracebrain/core/librarian.py +704 -0
- tracebrain/core/llm_providers.py +1147 -0
- tracebrain/core/schema.py +121 -0
- tracebrain/core/seeder.py +68 -0
- tracebrain/core/services/__init__.py +1 -0
- tracebrain/core/services/embedding.py +129 -0
- tracebrain/core/store.py +1773 -0
- tracebrain/db/__init__.py +0 -0
- tracebrain/db/base.py +400 -0
- tracebrain/db/session.py +132 -0
- tracebrain/evaluators/__init__.py +1 -0
- tracebrain/evaluators/judge_agent.py +270 -0
- tracebrain/main.py +268 -0
- tracebrain/resources/docker/Dockerfile +54 -0
- tracebrain/resources/docker/README.md +132 -0
- tracebrain/resources/docker/docker-compose.yml +93 -0
- tracebrain/resources/samples/sample_10_partial_failure.json +118 -0
- tracebrain/resources/samples/sample_11_episode_group_attempt_1.json +71 -0
- tracebrain/resources/samples/sample_12_episode_group_attempt_2.json +69 -0
- tracebrain/resources/samples/sample_13_governance_status.json +53 -0
- tracebrain/resources/samples/sample_14_failed_status.json +55 -0
- tracebrain/resources/samples/sample_15_hallucination.json +64 -0
- tracebrain/resources/samples/sample_16_format_error.json +35 -0
- tracebrain/resources/samples/sample_17_context_overflow.json +35 -0
- tracebrain/resources/samples/sample_18_invalid_arguments.json +66 -0
- tracebrain/resources/samples/sample_19_multi_agent_interaction.json +52 -0
- tracebrain/resources/samples/sample_1_simple_success.json +72 -0
- tracebrain/resources/samples/sample_20_experience_retrieval.json +65 -0
- tracebrain/resources/samples/sample_2_complex_multistep.json +102 -0
- tracebrain/resources/samples/sample_3_tool_error.json +72 -0
- tracebrain/resources/samples/sample_4_self_correction.json +102 -0
- tracebrain/resources/samples/sample_5_multi_tool_orchestration.json +102 -0
- tracebrain/resources/samples/sample_6_no_tool_call.json +38 -0
- tracebrain/resources/samples/sample_7_parallel_calls.json +82 -0
- tracebrain/resources/samples/sample_8_clarifying_question.json +38 -0
- tracebrain/resources/samples/sample_9_looping_behavior.json +135 -0
- tracebrain/sdk/__init__.py +19 -0
- tracebrain/sdk/agent_tools.py +111 -0
- tracebrain/sdk/client.py +785 -0
- tracebrain/sdk/trace_context.py +20 -0
- tracebrain/static/assets/chat-dark-bg-BmOTGz3x.png +0 -0
- tracebrain/static/assets/chat-light-bg-DwNPDG7g.png +0 -0
- tracebrain/static/assets/dark-owl-CATNyvf8.png +0 -0
- tracebrain/static/assets/index-B6hMk-_K.js +286 -0
- tracebrain/static/assets/index-CXBZvQ1E.css +1 -0
- tracebrain/static/assets/light-owl-CAs_QdDB.png +0 -0
- tracebrain/static/chat-dark-bg.png +0 -0
- tracebrain/static/chat-light-bg.png +0 -0
- tracebrain/static/favicon-dark.png +0 -0
- tracebrain/static/favicon-light.png +0 -0
- tracebrain/static/index.html +16 -0
- tracebrain-1.0.0.dist-info/METADATA +793 -0
- tracebrain-1.0.0.dist-info/RECORD +72 -0
- tracebrain-1.0.0.dist-info/WHEEL +5 -0
- tracebrain-1.0.0.dist-info/entry_points.txt +2 -0
- tracebrain-1.0.0.dist-info/top_level.txt +1 -0
tracebrain/__init__.py
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
"""
|
|
2
|
+
TraceBrain - Observability Platform for Agentic AI
|
|
3
|
+
|
|
4
|
+
This package provides a complete observability solution for AI agents,
|
|
5
|
+
allowing users to collect, store, and visualize execution traces.
|
|
6
|
+
|
|
7
|
+
Philosophy: "Pip install and run"
|
|
8
|
+
- Single package containing both backend (FastAPI) and frontend (React)
|
|
9
|
+
- Support for SQLite (development) and PostgreSQL (production)
|
|
10
|
+
- Custom TraceBrain Standard OTLP Trace Schema
|
|
11
|
+
- Robust SDK client with automatic retries and fail-safe design
|
|
12
|
+
|
|
13
|
+
Quick Start:
|
|
14
|
+
# Install
|
|
15
|
+
pip install tracebrain
|
|
16
|
+
|
|
17
|
+
# Start infrastructure with Docker (recommended)
|
|
18
|
+
tracebrain up
|
|
19
|
+
|
|
20
|
+
# Or use Python server directly for development
|
|
21
|
+
tracebrain init-db
|
|
22
|
+
tracebrain start
|
|
23
|
+
|
|
24
|
+
# Use the SDK client in your code
|
|
25
|
+
from tracebrain import TraceClient
|
|
26
|
+
|
|
27
|
+
client = TraceClient()
|
|
28
|
+
success = client.log_trace({
|
|
29
|
+
"trace_id": "abc123",
|
|
30
|
+
"attributes": {"system_prompt": "You are helpful"},
|
|
31
|
+
"spans": [...]
|
|
32
|
+
})
|
|
33
|
+
|
|
34
|
+
Usage:
|
|
35
|
+
# Import the FastAPI app
|
|
36
|
+
from tracebrain import app
|
|
37
|
+
|
|
38
|
+
# Import configuration
|
|
39
|
+
from tracebrain import settings
|
|
40
|
+
|
|
41
|
+
# Import SDK client (recommended)
|
|
42
|
+
from tracebrain import TraceClient
|
|
43
|
+
|
|
44
|
+
# Import TraceStore for programmatic access
|
|
45
|
+
from tracebrain.core.store import TraceStore
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
__version__ = "1.0.0"
|
|
49
|
+
__author__ = "TraceBrain Team"
|
|
50
|
+
|
|
51
|
+
# Expose main components for easy import
|
|
52
|
+
from .main import app
|
|
53
|
+
from .config import settings
|
|
54
|
+
from .sdk import TraceClient
|
|
55
|
+
from .sdk.client import TraceScope
|
|
56
|
+
|
|
57
|
+
__all__ = [
|
|
58
|
+
"app",
|
|
59
|
+
"settings",
|
|
60
|
+
"TraceClient",
|
|
61
|
+
"TraceScope",
|
|
62
|
+
"__version__",
|
|
63
|
+
]
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""AI-related endpoints for v1."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Dict, List
|
|
6
|
+
import uuid
|
|
7
|
+
|
|
8
|
+
from fastapi import APIRouter, HTTPException
|
|
9
|
+
|
|
10
|
+
from ...evaluators.judge_agent import AIJudge
|
|
11
|
+
from ...core.llm_providers import ProviderError
|
|
12
|
+
from .common import build_ai_evaluation, get_librarian_agent, store
|
|
13
|
+
from .schemas.api_models import (
|
|
14
|
+
AIEvaluationIn,
|
|
15
|
+
AIEvaluationOut,
|
|
16
|
+
ChatHistoryOut,
|
|
17
|
+
NaturalLanguageQuery,
|
|
18
|
+
NaturalLanguageResponse,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
router = APIRouter()
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def _normalize_suggestions(value: Any) -> List[Dict[str, str]]:
|
|
25
|
+
if not isinstance(value, list):
|
|
26
|
+
return []
|
|
27
|
+
|
|
28
|
+
normalized: List[Dict[str, str]] = []
|
|
29
|
+
for item in value:
|
|
30
|
+
if not isinstance(item, dict):
|
|
31
|
+
continue
|
|
32
|
+
label = str(item.get("label", "")).strip()
|
|
33
|
+
suggestion_value = str(item.get("value", "")).strip()
|
|
34
|
+
if label and suggestion_value:
|
|
35
|
+
normalized.append({"label": label, "value": suggestion_value})
|
|
36
|
+
return normalized
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def _normalize_sources(value: Any) -> List[str]:
|
|
40
|
+
normalized: List[str] = []
|
|
41
|
+
|
|
42
|
+
def _append_candidate(candidate: Any) -> None:
|
|
43
|
+
if candidate is None:
|
|
44
|
+
return
|
|
45
|
+
if isinstance(candidate, str):
|
|
46
|
+
text = candidate.strip()
|
|
47
|
+
if text:
|
|
48
|
+
normalized.append(text)
|
|
49
|
+
return
|
|
50
|
+
if isinstance(candidate, dict):
|
|
51
|
+
source_id = candidate.get("trace_id") or candidate.get("id")
|
|
52
|
+
if source_id is not None:
|
|
53
|
+
text = str(source_id).strip()
|
|
54
|
+
if text:
|
|
55
|
+
normalized.append(text)
|
|
56
|
+
return
|
|
57
|
+
|
|
58
|
+
text = str(candidate).strip()
|
|
59
|
+
if text:
|
|
60
|
+
normalized.append(text)
|
|
61
|
+
|
|
62
|
+
if isinstance(value, (list, tuple, set)):
|
|
63
|
+
for item in value:
|
|
64
|
+
_append_candidate(item)
|
|
65
|
+
elif value is not None:
|
|
66
|
+
_append_candidate(value)
|
|
67
|
+
|
|
68
|
+
return list(dict.fromkeys(normalized))
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _normalize_filters(value: Any) -> Dict[str, Any]:
|
|
72
|
+
if not isinstance(value, dict):
|
|
73
|
+
return {}
|
|
74
|
+
return {str(k).strip(): v for k, v in value.items() if k and v is not None}
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
def _build_nlq_response(result: Dict[str, Any], session_id: str) -> NaturalLanguageResponse:
|
|
78
|
+
return NaturalLanguageResponse(
|
|
79
|
+
answer=str(result.get("answer", "")),
|
|
80
|
+
session_id=session_id,
|
|
81
|
+
suggestions=_normalize_suggestions(result.get("suggestions")),
|
|
82
|
+
sources=_normalize_sources(result.get("sources")),
|
|
83
|
+
filters=_normalize_filters(result.get("filters")),
|
|
84
|
+
is_error=bool(result.get("is_error", False)),
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
@router.get("/librarian_sessions/{session_id}", response_model=ChatHistoryOut, tags=["AI"])
|
|
89
|
+
def get_librarian_session(session_id: str):
|
|
90
|
+
"""Fetch the stored chat history for a Librarian session."""
|
|
91
|
+
try:
|
|
92
|
+
messages = store.get_chat_history(session_id)
|
|
93
|
+
if not messages:
|
|
94
|
+
raise HTTPException(status_code=404, detail="Session not found")
|
|
95
|
+
|
|
96
|
+
return ChatHistoryOut(session_id=session_id, messages=messages)
|
|
97
|
+
except HTTPException:
|
|
98
|
+
raise
|
|
99
|
+
except Exception as exc:
|
|
100
|
+
raise HTTPException(status_code=500, detail=f"Failed to load session: {str(exc)}")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@router.post("/natural_language_query", response_model=NaturalLanguageResponse, tags=["AI"])
|
|
104
|
+
def natural_language_query(query: NaturalLanguageQuery):
|
|
105
|
+
"""
|
|
106
|
+
Process a natural language query about traces using the configured LLM provider.
|
|
107
|
+
|
|
108
|
+
The provider is selected via settings (LIBRARIAN_MODE/LLM_PROVIDER) and can route to
|
|
109
|
+
API-hosted or open-source backends. The agent uses function calling (when supported)
|
|
110
|
+
to query the TraceStore.
|
|
111
|
+
"""
|
|
112
|
+
session_id = query.session_id or str(uuid.uuid4())
|
|
113
|
+
|
|
114
|
+
try:
|
|
115
|
+
agent = get_librarian_agent()
|
|
116
|
+
result = agent.query(query.query, session_id=session_id)
|
|
117
|
+
if not isinstance(result, dict):
|
|
118
|
+
result = {
|
|
119
|
+
"answer": str(result),
|
|
120
|
+
"suggestions": [],
|
|
121
|
+
"sources": [],
|
|
122
|
+
"filters": {},
|
|
123
|
+
}
|
|
124
|
+
return _build_nlq_response(result, session_id=session_id)
|
|
125
|
+
|
|
126
|
+
except Exception as exc:
|
|
127
|
+
if isinstance(exc, ProviderError):
|
|
128
|
+
answer = str(exc)
|
|
129
|
+
else:
|
|
130
|
+
answer = (
|
|
131
|
+
"Sorry, I encountered an error processing your query. "
|
|
132
|
+
"Please try rephrasing your question or check the server logs."
|
|
133
|
+
)
|
|
134
|
+
return _build_nlq_response(
|
|
135
|
+
{
|
|
136
|
+
"answer": answer,
|
|
137
|
+
"suggestions": [],
|
|
138
|
+
"sources": [],
|
|
139
|
+
"filters": {},
|
|
140
|
+
"is_error": True,
|
|
141
|
+
},
|
|
142
|
+
session_id=session_id,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
@router.post("/ai_evaluate/{trace_id}", response_model=AIEvaluationOut, tags=["AI Evaluation"])
|
|
147
|
+
def evaluate_trace_with_ai(trace_id: str, payload: AIEvaluationIn):
|
|
148
|
+
"""
|
|
149
|
+
Evaluate a trace with a judge model.
|
|
150
|
+
|
|
151
|
+
This endpoint is designed as a hook for more complex AI evaluation logic.
|
|
152
|
+
"""
|
|
153
|
+
try:
|
|
154
|
+
judge = AIJudge(store)
|
|
155
|
+
result = judge.evaluate(trace_id, payload.judge_model_id)
|
|
156
|
+
|
|
157
|
+
ai_eval = build_ai_evaluation(result)
|
|
158
|
+
store.update_ai_evaluation(trace_id, ai_eval)
|
|
159
|
+
|
|
160
|
+
return AIEvaluationOut(**ai_eval)
|
|
161
|
+
|
|
162
|
+
except ValueError as exc:
|
|
163
|
+
message = str(exc)
|
|
164
|
+
if "Trace not found" in message:
|
|
165
|
+
raise HTTPException(status_code=404, detail=message)
|
|
166
|
+
raise HTTPException(status_code=400, detail=message)
|
|
167
|
+
except Exception as exc:
|
|
168
|
+
raise HTTPException(status_code=500, detail=f"AI evaluation failed: {exc}")
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
"""Main API router for v1."""
|
|
2
|
+
|
|
3
|
+
from fastapi import APIRouter
|
|
4
|
+
|
|
5
|
+
from .ai_features import router as ai_router
|
|
6
|
+
from .curriculum import router as curriculum_router
|
|
7
|
+
from .episodes import router as episodes_router
|
|
8
|
+
from .operations import router as operations_router
|
|
9
|
+
from .system import router as system_router
|
|
10
|
+
from .traces import router as traces_router
|
|
11
|
+
|
|
12
|
+
router = APIRouter()
|
|
13
|
+
|
|
14
|
+
router.include_router(system_router)
|
|
15
|
+
router.include_router(traces_router)
|
|
16
|
+
router.include_router(episodes_router)
|
|
17
|
+
router.include_router(curriculum_router)
|
|
18
|
+
router.include_router(operations_router)
|
|
19
|
+
router.include_router(ai_router)
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
"""Shared dependencies and helpers for v1 routers."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import Any, Dict
|
|
7
|
+
import logging
|
|
8
|
+
|
|
9
|
+
from ...core.store import TraceStore
|
|
10
|
+
from ...evaluators.judge_agent import AIJudge
|
|
11
|
+
from ...core.librarian import LibrarianAgent
|
|
12
|
+
from ...config import settings
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
store = TraceStore(
|
|
17
|
+
backend=settings.get_backend_type(),
|
|
18
|
+
db_url=settings.DATABASE_URL,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
_librarian_agent = None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def get_librarian_agent() -> LibrarianAgent:
|
|
25
|
+
"""Lazy initialization of Librarian agent."""
|
|
26
|
+
global _librarian_agent
|
|
27
|
+
if _librarian_agent is None:
|
|
28
|
+
_librarian_agent = LibrarianAgent(store)
|
|
29
|
+
return _librarian_agent
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def build_ai_evaluation(result: Dict[str, Any]) -> Dict[str, Any]:
|
|
33
|
+
confidence = float(result.get("confidence", 0.0))
|
|
34
|
+
status_value = "auto_verified" if confidence > 0.8 else "pending_review"
|
|
35
|
+
return {
|
|
36
|
+
"rating": result.get("rating"),
|
|
37
|
+
"feedback": result.get("feedback"),
|
|
38
|
+
"confidence": confidence,
|
|
39
|
+
"error_type": result.get("error_type", "none"),
|
|
40
|
+
"status": status_value,
|
|
41
|
+
"priority": result.get("priority"),
|
|
42
|
+
"timestamp": datetime.utcnow().isoformat(),
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def run_bg_evaluation(trace_id: str) -> None:
|
|
47
|
+
try:
|
|
48
|
+
judge = AIJudge(store)
|
|
49
|
+
result = judge.evaluate(trace_id)
|
|
50
|
+
ai_eval = build_ai_evaluation(result)
|
|
51
|
+
store.update_ai_evaluation(trace_id, ai_eval)
|
|
52
|
+
except Exception as exc:
|
|
53
|
+
logger.error("Background evaluation failed for %s: %s", trace_id, exc)
|
|
@@ -0,0 +1,138 @@
|
|
|
1
|
+
"""Curriculum endpoints for v1."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List
|
|
6
|
+
import json
|
|
7
|
+
|
|
8
|
+
from fastapi import APIRouter, HTTPException, Query
|
|
9
|
+
from fastapi.responses import Response
|
|
10
|
+
|
|
11
|
+
from ...core.curator import CurriculumCurator
|
|
12
|
+
from ...db.base import CurriculumTask
|
|
13
|
+
from .common import store
|
|
14
|
+
from .schemas.api_models import CurriculumTaskOut, GenerateCurriculumRequest
|
|
15
|
+
|
|
16
|
+
router = APIRouter(prefix="/curriculum", tags=["Curriculum"])
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
@router.post("/generate")
|
|
20
|
+
def generate_curriculum(request: GenerateCurriculumRequest):
|
|
21
|
+
"""Generate curriculum tasks from failed traces."""
|
|
22
|
+
try:
|
|
23
|
+
curator = CurriculumCurator(store)
|
|
24
|
+
provided_error_types = request.error_types or []
|
|
25
|
+
valid_error_types = [
|
|
26
|
+
value for value in provided_error_types if value in curator.VALID_ERROR_TYPES
|
|
27
|
+
]
|
|
28
|
+
invalid_error_types = [
|
|
29
|
+
value for value in provided_error_types if value not in curator.VALID_ERROR_TYPES
|
|
30
|
+
]
|
|
31
|
+
created = curator.generate_curriculum(
|
|
32
|
+
error_types=valid_error_types or None,
|
|
33
|
+
limit=request.limit,
|
|
34
|
+
)
|
|
35
|
+
response = {"status": "success", "tasks_generated": created}
|
|
36
|
+
if invalid_error_types:
|
|
37
|
+
response["warning"] = {
|
|
38
|
+
"message": "Some error_types were not recognized and were ignored.",
|
|
39
|
+
"invalid_error_types": invalid_error_types,
|
|
40
|
+
}
|
|
41
|
+
return response
|
|
42
|
+
except ValueError as exc:
|
|
43
|
+
raise HTTPException(status_code=400, detail=str(exc))
|
|
44
|
+
except Exception as exc:
|
|
45
|
+
raise HTTPException(status_code=500, detail=f"Failed to generate curriculum: {str(exc)}")
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
@router.get("", response_model=List[CurriculumTaskOut])
|
|
49
|
+
def list_curriculum_tasks():
|
|
50
|
+
"""List all curriculum tasks ordered by creation time."""
|
|
51
|
+
session = store.get_session()
|
|
52
|
+
try:
|
|
53
|
+
return (
|
|
54
|
+
session.query(CurriculumTask)
|
|
55
|
+
.order_by(CurriculumTask.created_at.desc())
|
|
56
|
+
.all()
|
|
57
|
+
)
|
|
58
|
+
finally:
|
|
59
|
+
session.close()
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
@router.get("/export")
|
|
63
|
+
def export_curriculum(
|
|
64
|
+
format: str = Query("json", description="Export format: 'json' or 'jsonl'"),
|
|
65
|
+
):
|
|
66
|
+
"""Export pending curriculum tasks for training ingestion."""
|
|
67
|
+
format_value = format.lower().strip()
|
|
68
|
+
if format_value not in {"json", "jsonl"}:
|
|
69
|
+
raise HTTPException(status_code=400, detail="Invalid format. Use 'json' or 'jsonl'.")
|
|
70
|
+
try:
|
|
71
|
+
tasks = store.get_pending_curriculum(limit=100)
|
|
72
|
+
|
|
73
|
+
export_data = []
|
|
74
|
+
for task in tasks:
|
|
75
|
+
export_data.append(
|
|
76
|
+
{
|
|
77
|
+
"id": task["id"],
|
|
78
|
+
"role": "user",
|
|
79
|
+
"content": task["instruction"],
|
|
80
|
+
"metadata": {
|
|
81
|
+
"difficulty": task["priority"],
|
|
82
|
+
"focus": "auto_curriculum",
|
|
83
|
+
"reasoning": task["context"],
|
|
84
|
+
},
|
|
85
|
+
}
|
|
86
|
+
)
|
|
87
|
+
|
|
88
|
+
if format_value == "jsonl":
|
|
89
|
+
jsonl_content = "\n".join(json.dumps(item) for item in export_data)
|
|
90
|
+
return Response(content=jsonl_content, media_type="application/x-jsonlines")
|
|
91
|
+
|
|
92
|
+
return export_data
|
|
93
|
+
except Exception as exc:
|
|
94
|
+
raise HTTPException(status_code=500, detail=str(exc))
|
|
95
|
+
|
|
96
|
+
@router.delete("/{task_id}", tags=["Curriculum"])
|
|
97
|
+
def delete_curriculum_task(task_id: int):
|
|
98
|
+
"""Delete a single curriculum task by ID."""
|
|
99
|
+
try:
|
|
100
|
+
deleted = store.delete_curriculum_task(task_id)
|
|
101
|
+
if not deleted:
|
|
102
|
+
raise HTTPException(status_code=404, detail="Task not found")
|
|
103
|
+
return Response(status_code=204)
|
|
104
|
+
except HTTPException:
|
|
105
|
+
raise
|
|
106
|
+
except Exception as e:
|
|
107
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
108
|
+
|
|
109
|
+
@router.delete("", tags=["Curriculum"])
|
|
110
|
+
def delete_all_curriculum_tasks():
|
|
111
|
+
"""Delete all curriculum tasks."""
|
|
112
|
+
try:
|
|
113
|
+
store.delete_all_curriculum_tasks()
|
|
114
|
+
return Response(status_code=204)
|
|
115
|
+
except Exception as e:
|
|
116
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
117
|
+
|
|
118
|
+
@router.patch("/{task_id}/complete", response_model=CurriculumTaskOut, tags=["Curriculum"])
|
|
119
|
+
def mark_curriculum_task_complete(task_id: int):
|
|
120
|
+
"""Mark a single curriculum task as complete."""
|
|
121
|
+
try:
|
|
122
|
+
task = store.mark_curriculum_task_complete(task_id)
|
|
123
|
+
if not task:
|
|
124
|
+
raise HTTPException(status_code=404, detail="Task not found")
|
|
125
|
+
return task
|
|
126
|
+
except HTTPException:
|
|
127
|
+
raise
|
|
128
|
+
except Exception as e:
|
|
129
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
130
|
+
|
|
131
|
+
@router.patch("/complete", tags=["Curriculum"])
|
|
132
|
+
def mark_all_curriculum_tasks_complete():
|
|
133
|
+
"""Mark all curriculum tasks as complete."""
|
|
134
|
+
try:
|
|
135
|
+
store.mark_all_curriculum_tasks_complete()
|
|
136
|
+
return Response(status_code=204)
|
|
137
|
+
except Exception as e:
|
|
138
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
"""Episode endpoints for v1."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import List, Optional
|
|
6
|
+
|
|
7
|
+
from fastapi import APIRouter, HTTPException, Query
|
|
8
|
+
|
|
9
|
+
from .common import store
|
|
10
|
+
from .schemas.api_models import (
|
|
11
|
+
EpisodeAggregateOut,
|
|
12
|
+
EpisodeListOut,
|
|
13
|
+
EpisodeOut,
|
|
14
|
+
EpisodeSummaryListOut,
|
|
15
|
+
EpisodeTracesOut,
|
|
16
|
+
TraceSummaryOut,
|
|
17
|
+
trace_to_out,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
router = APIRouter(prefix="/episodes", tags=["Episodes"])
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@router.get("", response_model=EpisodeListOut)
|
|
24
|
+
def list_episodes(
|
|
25
|
+
skip: int = Query(0, ge=0, description="Number of episodes to skip"),
|
|
26
|
+
limit: int = Query(10, ge=1, le=100, description="Maximum number of episodes to return"),
|
|
27
|
+
query: Optional[str] = Query(None, description="Filter episodes by ID"),
|
|
28
|
+
min_confidence_lt: Optional[float] = Query(
|
|
29
|
+
None,
|
|
30
|
+
ge=0.0,
|
|
31
|
+
le=1.0,
|
|
32
|
+
description="Filter episodes where minimum confidence is below this value",
|
|
33
|
+
),
|
|
34
|
+
):
|
|
35
|
+
"""List all episodes ordered by creation time, each with their traces."""
|
|
36
|
+
try:
|
|
37
|
+
episodes, total = store.list_episodes(
|
|
38
|
+
skip=skip,
|
|
39
|
+
limit=limit,
|
|
40
|
+
query=query,
|
|
41
|
+
include_spans=True,
|
|
42
|
+
min_confidence_lt=min_confidence_lt,
|
|
43
|
+
)
|
|
44
|
+
|
|
45
|
+
episode_outs = []
|
|
46
|
+
for episode_id, traces in episodes:
|
|
47
|
+
trace_outs = [trace_to_out(trace) for trace in traces]
|
|
48
|
+
episode_outs.append(EpisodeTracesOut(episode_id=episode_id, traces=trace_outs))
|
|
49
|
+
|
|
50
|
+
return EpisodeListOut(total=total, skip=skip, limit=limit, episodes=episode_outs)
|
|
51
|
+
|
|
52
|
+
except Exception as exc:
|
|
53
|
+
raise HTTPException(status_code=500, detail=f"Failed to list episodes: {str(exc)}")
|
|
54
|
+
|
|
55
|
+
@router.delete("/{episode_id}")
|
|
56
|
+
def delete_episode(episode_id: str):
|
|
57
|
+
"""Delete an episode and all its traces."""
|
|
58
|
+
try:
|
|
59
|
+
store.delete_episode(episode_id)
|
|
60
|
+
except Exception as exc:
|
|
61
|
+
raise HTTPException(status_code=500, detail=f"Failed to delete episode: {str(exc)}")
|
|
62
|
+
|
|
63
|
+
@router.get("/summary", response_model=EpisodeSummaryListOut)
|
|
64
|
+
def list_episode_summaries(
|
|
65
|
+
skip: int = Query(0, ge=0, description="Number of episodes to skip"),
|
|
66
|
+
limit: int = Query(10, ge=1, le=100, description="Maximum number of episodes to return"),
|
|
67
|
+
query: Optional[str] = Query(None, description="Filter episodes by ID"),
|
|
68
|
+
min_confidence_lt: Optional[float] = Query(
|
|
69
|
+
None,
|
|
70
|
+
ge=0.0,
|
|
71
|
+
le=1.0,
|
|
72
|
+
description="Filter episodes where minimum confidence is below this value",
|
|
73
|
+
),
|
|
74
|
+
):
|
|
75
|
+
"""List episodes with aggregated metrics."""
|
|
76
|
+
try:
|
|
77
|
+
episodes, total = store.list_episode_summaries(
|
|
78
|
+
skip=skip,
|
|
79
|
+
limit=limit,
|
|
80
|
+
query=query,
|
|
81
|
+
min_confidence_lt=min_confidence_lt,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
episode_outs = [EpisodeAggregateOut(**episode) for episode in episodes]
|
|
85
|
+
|
|
86
|
+
return EpisodeSummaryListOut(total=total, skip=skip, limit=limit, episodes=episode_outs)
|
|
87
|
+
|
|
88
|
+
except Exception as exc:
|
|
89
|
+
raise HTTPException(status_code=500, detail=f"Failed to list episode summaries: {str(exc)}")
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@router.get("/{episode_id}", response_model=EpisodeOut)
|
|
93
|
+
def get_episode_details(episode_id: str):
|
|
94
|
+
"""Get episode details including the list of traces in that episode."""
|
|
95
|
+
try:
|
|
96
|
+
traces_in_episode = store.get_traces_by_episode_id(episode_id)
|
|
97
|
+
|
|
98
|
+
if not traces_in_episode:
|
|
99
|
+
raise HTTPException(status_code=404, detail="Episode not found")
|
|
100
|
+
|
|
101
|
+
trace_summaries: List[TraceSummaryOut] = []
|
|
102
|
+
for trace in traces_in_episode:
|
|
103
|
+
spans = trace.spans or []
|
|
104
|
+
span_count = len(spans)
|
|
105
|
+
|
|
106
|
+
start_times = [span.start_time for span in spans if span.start_time]
|
|
107
|
+
end_times = [span.end_time for span in spans if span.end_time]
|
|
108
|
+
duration_ms = 0.0
|
|
109
|
+
if start_times and end_times:
|
|
110
|
+
duration_ms = (max(end_times) - min(start_times)).total_seconds() * 1000
|
|
111
|
+
|
|
112
|
+
status = "OK"
|
|
113
|
+
for span in spans:
|
|
114
|
+
name = (span.name or "").lower()
|
|
115
|
+
span_type = (span.attributes or {}).get("tracebrain.span.type")
|
|
116
|
+
if "error" in name or span_type == "tool_error":
|
|
117
|
+
status = "ERROR"
|
|
118
|
+
break
|
|
119
|
+
|
|
120
|
+
trace_summaries.append(
|
|
121
|
+
TraceSummaryOut(
|
|
122
|
+
trace_id=trace.id,
|
|
123
|
+
status=status,
|
|
124
|
+
duration_ms=round(duration_ms, 2),
|
|
125
|
+
span_count=span_count,
|
|
126
|
+
created_at=trace.created_at,
|
|
127
|
+
)
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
return EpisodeOut(episode_id=episode_id, traces=trace_summaries)
|
|
131
|
+
|
|
132
|
+
except HTTPException:
|
|
133
|
+
raise
|
|
134
|
+
except Exception as exc:
|
|
135
|
+
raise HTTPException(status_code=500, detail=str(exc))
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
@router.get("/{episode_id}/traces", response_model=EpisodeTracesOut)
|
|
139
|
+
def get_episode_traces(episode_id: str):
|
|
140
|
+
"""Get all traces related to an episode."""
|
|
141
|
+
try:
|
|
142
|
+
traces_in_episode = store.get_traces_by_episode_id(episode_id)
|
|
143
|
+
if not traces_in_episode:
|
|
144
|
+
raise HTTPException(status_code=404, detail="Episode not found")
|
|
145
|
+
|
|
146
|
+
trace_ids = [trace.id for trace in traces_in_episode]
|
|
147
|
+
traces = store.get_traces_by_ids(trace_ids, include_spans=True)
|
|
148
|
+
trace_outs = [trace_to_out(trace) for trace in traces]
|
|
149
|
+
return EpisodeTracesOut(episode_id=episode_id, traces=trace_outs)
|
|
150
|
+
|
|
151
|
+
except HTTPException:
|
|
152
|
+
raise
|
|
153
|
+
except Exception as exc:
|
|
154
|
+
raise HTTPException(status_code=500, detail=str(exc))
|