tracebrain 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- tracebrain-1.0.0/PKG-INFO +793 -0
- tracebrain-1.0.0/README.md +740 -0
- tracebrain-1.0.0/pyproject.toml +115 -0
- tracebrain-1.0.0/setup.cfg +4 -0
- tracebrain-1.0.0/src/tracebrain/__init__.py +63 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/__init__.py +9 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/ai_features.py +168 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/api_router.py +19 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/common.py +53 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/curriculum.py +138 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/episodes.py +154 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/operations.py +115 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/schemas/__init__.py +77 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/schemas/api_models.py +414 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/system.py +201 -0
- tracebrain-1.0.0/src/tracebrain/api/v1/traces.py +387 -0
- tracebrain-1.0.0/src/tracebrain/cli.py +655 -0
- tracebrain-1.0.0/src/tracebrain/config.py +224 -0
- tracebrain-1.0.0/src/tracebrain/core/__init__.py +0 -0
- tracebrain-1.0.0/src/tracebrain/core/curator.py +301 -0
- tracebrain-1.0.0/src/tracebrain/core/librarian.py +704 -0
- tracebrain-1.0.0/src/tracebrain/core/llm_providers.py +1147 -0
- tracebrain-1.0.0/src/tracebrain/core/schema.py +121 -0
- tracebrain-1.0.0/src/tracebrain/core/seeder.py +68 -0
- tracebrain-1.0.0/src/tracebrain/core/services/__init__.py +1 -0
- tracebrain-1.0.0/src/tracebrain/core/services/embedding.py +129 -0
- tracebrain-1.0.0/src/tracebrain/core/store.py +1773 -0
- tracebrain-1.0.0/src/tracebrain/db/__init__.py +0 -0
- tracebrain-1.0.0/src/tracebrain/db/base.py +400 -0
- tracebrain-1.0.0/src/tracebrain/db/session.py +132 -0
- tracebrain-1.0.0/src/tracebrain/evaluators/__init__.py +1 -0
- tracebrain-1.0.0/src/tracebrain/evaluators/judge_agent.py +270 -0
- tracebrain-1.0.0/src/tracebrain/main.py +268 -0
- tracebrain-1.0.0/src/tracebrain/resources/docker/Dockerfile +54 -0
- tracebrain-1.0.0/src/tracebrain/resources/docker/README.md +132 -0
- tracebrain-1.0.0/src/tracebrain/resources/docker/docker-compose.yml +93 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_10_partial_failure.json +118 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_11_episode_group_attempt_1.json +71 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_12_episode_group_attempt_2.json +69 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_13_governance_status.json +53 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_14_failed_status.json +55 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_15_hallucination.json +64 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_16_format_error.json +35 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_17_context_overflow.json +35 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_18_invalid_arguments.json +66 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_19_multi_agent_interaction.json +52 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_1_simple_success.json +72 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_20_experience_retrieval.json +65 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_2_complex_multistep.json +102 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_3_tool_error.json +72 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_4_self_correction.json +102 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_5_multi_tool_orchestration.json +102 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_6_no_tool_call.json +38 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_7_parallel_calls.json +82 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_8_clarifying_question.json +38 -0
- tracebrain-1.0.0/src/tracebrain/resources/samples/sample_9_looping_behavior.json +135 -0
- tracebrain-1.0.0/src/tracebrain/sdk/__init__.py +19 -0
- tracebrain-1.0.0/src/tracebrain/sdk/agent_tools.py +111 -0
- tracebrain-1.0.0/src/tracebrain/sdk/client.py +785 -0
- tracebrain-1.0.0/src/tracebrain/sdk/trace_context.py +20 -0
- tracebrain-1.0.0/src/tracebrain/static/assets/chat-dark-bg-BmOTGz3x.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/assets/chat-light-bg-DwNPDG7g.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/assets/dark-owl-CATNyvf8.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/assets/index-B6hMk-_K.js +286 -0
- tracebrain-1.0.0/src/tracebrain/static/assets/index-CXBZvQ1E.css +1 -0
- tracebrain-1.0.0/src/tracebrain/static/assets/light-owl-CAs_QdDB.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/chat-dark-bg.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/chat-light-bg.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/favicon-dark.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/favicon-light.png +0 -0
- tracebrain-1.0.0/src/tracebrain/static/index.html +16 -0
- tracebrain-1.0.0/src/tracebrain.egg-info/PKG-INFO +793 -0
- tracebrain-1.0.0/src/tracebrain.egg-info/SOURCES.txt +75 -0
- tracebrain-1.0.0/src/tracebrain.egg-info/dependency_links.txt +1 -0
- tracebrain-1.0.0/src/tracebrain.egg-info/entry_points.txt +2 -0
- tracebrain-1.0.0/src/tracebrain.egg-info/requires.txt +37 -0
- tracebrain-1.0.0/src/tracebrain.egg-info/top_level.txt +1 -0
|
@@ -0,0 +1,793 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: tracebrain
|
|
3
|
+
Version: 1.0.0
|
|
4
|
+
Summary: A standalone trace management platform for observability and continuous improvement of LLM-based agents.
|
|
5
|
+
Author: TraceBrain Team
|
|
6
|
+
License: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/ToolBrain/TraceBrain
|
|
8
|
+
Project-URL: Repository, https://github.com/ToolBrain/TraceBrain
|
|
9
|
+
Project-URL: Issues, https://github.com/ToolBrain/TraceBrain/issues
|
|
10
|
+
Keywords: llm,llm-agents,agent-observability,tracing,trace-management,opentelemetry,langchain,agent-evaluation,debugging,monitoring
|
|
11
|
+
Classifier: Development Status :: 4 - Beta
|
|
12
|
+
Classifier: Intended Audience :: Developers
|
|
13
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
20
|
+
Requires-Python: >=3.8
|
|
21
|
+
Description-Content-Type: text/markdown
|
|
22
|
+
Requires-Dist: sqlalchemy<3,>=2.0
|
|
23
|
+
Requires-Dist: psycopg2-binary>=2.9
|
|
24
|
+
Requires-Dist: fastapi<1,>=0.104
|
|
25
|
+
Requires-Dist: uvicorn[standard]<1,>=0.24
|
|
26
|
+
Requires-Dist: pydantic<3,>=2.0
|
|
27
|
+
Requires-Dist: pydantic-settings<3,>=2.0
|
|
28
|
+
Requires-Dist: typer<1,>=0.24
|
|
29
|
+
Requires-Dist: requests<3,>=2.31
|
|
30
|
+
Requires-Dist: sqlparse>=0.5
|
|
31
|
+
Requires-Dist: pgvector>=0.2
|
|
32
|
+
Requires-Dist: google-genai<2,>=1.60
|
|
33
|
+
Requires-Dist: python-dotenv>=1.0.0
|
|
34
|
+
Requires-Dist: platformdirs>=4.0
|
|
35
|
+
Provides-Extra: openai
|
|
36
|
+
Requires-Dist: openai<3,>=2.26; extra == "openai"
|
|
37
|
+
Provides-Extra: anthropic
|
|
38
|
+
Requires-Dist: anthropic<1,>=0.34; extra == "anthropic"
|
|
39
|
+
Provides-Extra: huggingface
|
|
40
|
+
Requires-Dist: huggingface_hub<2,>=1.0; extra == "huggingface"
|
|
41
|
+
Provides-Extra: all-llms
|
|
42
|
+
Requires-Dist: openai<3,>=2.26; extra == "all-llms"
|
|
43
|
+
Requires-Dist: anthropic<1,>=0.34; extra == "all-llms"
|
|
44
|
+
Requires-Dist: huggingface_hub<2,>=1.0; extra == "all-llms"
|
|
45
|
+
Provides-Extra: embeddings-local
|
|
46
|
+
Requires-Dist: sentence-transformers>=2.7.0; extra == "embeddings-local"
|
|
47
|
+
Provides-Extra: dev
|
|
48
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
|
49
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
|
50
|
+
Requires-Dist: black>=23.0.0; extra == "dev"
|
|
51
|
+
Requires-Dist: isort>=5.12.0; extra == "dev"
|
|
52
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
|
53
|
+
|
|
54
|
+
# TraceBrain: An Open-Source Framework for Agentic Trace Management 🧠🚀
|
|
55
|
+
|
|
56
|
+
<p align="center">
|
|
57
|
+
<picture>
|
|
58
|
+
<source media="(prefers-color-scheme: dark)" srcset="images/banner-dark.png">
|
|
59
|
+
<source media="(prefers-color-scheme: light)" srcset="images/banner-light.png">
|
|
60
|
+
<img alt="TraceBrain Banner" src="images/banner-light.png" width="100%">
|
|
61
|
+
</picture>
|
|
62
|
+
</p>
|
|
63
|
+
|
|
64
|
+
<p align="center">
|
|
65
|
+
<img src="https://img.shields.io/badge/Release-v1.0.0-blue" alt="Release">
|
|
66
|
+
<img src="https://img.shields.io/badge/License-MIT-green" alt="License">
|
|
67
|
+
</p>
|
|
68
|
+
|
|
69
|
+
**TraceBrain** is an open-source platform for collecting, managing, and analyzing execution traces from LLM agents.
|
|
70
|
+
|
|
71
|
+
The system standardizes heterogeneous agent logs into a unified trace format, enabling consistent inspection, evaluation, and downstream analysis across different frameworks.
|
|
72
|
+
|
|
73
|
+
By organizing historical traces as structured artifacts, TraceBrain supports agent observability, human oversight, and iterative improvement of agent workflows.
|
|
74
|
+
|
|
75
|
+
## ✨ Key Features
|
|
76
|
+
|
|
77
|
+
### 📥 Ingestion Layer (Standardization)
|
|
78
|
+
- **Standardized Trace Format**: Capture agent workflows using a unified OTLP-based schema.
|
|
79
|
+
- **Framework-Agnostic Integration**: Lightweight SDK and converters support agents built with various frameworks (e.g., LangChain, SmolAgents) or custom implementations.
|
|
80
|
+
- **Delta-based Tracing**: Stores only incremental context updates (`new_content`) to reduce redundant prompt storage.
|
|
81
|
+
|
|
82
|
+
### 🛡️ Governance Layer (Human-in-the-loop)
|
|
83
|
+
- **Active Help Request**: Allows agents to escalate uncertain decisions to human experts during execution.
|
|
84
|
+
- **Command Center UI**: Visualize multi-step agent traces and enable expert inspection and feedback.
|
|
85
|
+
- **Semi-Automated Evaluation**: An AI Judge generates draft evaluations (e.g., `rating`, `confidence`, `error_type`, and `feedback`) that experts can review and finalize.
|
|
86
|
+
|
|
87
|
+
### 🧠 Cognitive Layer (Trace-driven Learning)
|
|
88
|
+
- **Experience Retrieval**: Agents can query past successful trajectories to guide reasoning via in-context learning.
|
|
89
|
+
- **Automated Curriculum Generation**: Using error classifications produced by the AI Judge, a Curator agent analyzes clustered failure traces and synthesizes targeted training tasks.
|
|
90
|
+
- **Semantic Trace Search**: Vector-based retrieval (via `pgvector`) for locating similar reasoning trajectories.
|
|
91
|
+
|
|
92
|
+
## 🏗️ Architecture
|
|
93
|
+
|
|
94
|
+

|
|
95
|
+
|
|
96
|
+
- **Your AI Agent:** Any agent framework. Uses the TraceClient SDK to send data.
|
|
97
|
+
- **TraceStore API:** The central FastAPI server. Ingests, stores, and serves trace data.
|
|
98
|
+
- **Database:** The persistence layer (PostgreSQL or SQLite).
|
|
99
|
+
- **Admin Panel UI:** A React client in `web/` that consumes the TraceStore API.
|
|
100
|
+
|
|
101
|
+
**Tech Stack:**
|
|
102
|
+
- **Backend**: FastAPI, SQLAlchemy 2.0, Pydantic V2
|
|
103
|
+
- **Database**: PostgreSQL (production), SQLite (development), pgvector (semantic search)
|
|
104
|
+
- **Frontend**: React (Vite + MUI) in `web/`
|
|
105
|
+
- **Deployment**: Docker Compose
|
|
106
|
+
- **AI Integration**: LibrarianAgent + AI Judge + Curriculum Curator with multi-provider LLM support
|
|
107
|
+
- **Embeddings**: sentence-transformers (local) or OpenAI/Gemini (cloud)
|
|
108
|
+
|
|
109
|
+
## 📸 Platform Showcase
|
|
110
|
+
|
|
111
|
+
Take a look at the TraceBrain Command Center in action:
|
|
112
|
+
|
|
113
|
+
<p align="center">
|
|
114
|
+
<b>🌐 Welcome to the Command Center</b><br>
|
|
115
|
+
<i>The central hub for agentic trace management, featuring a clean, intuitive, and modern interface.</i><br>
|
|
116
|
+
<img src="images/homepage.jpg" alt="TraceBrain Homepage" width="100%">
|
|
117
|
+
</p>
|
|
118
|
+
|
|
119
|
+
<table>
|
|
120
|
+
<tr>
|
|
121
|
+
<td width="50%">
|
|
122
|
+
<b>📊 Command Center Dashboard</b><br>
|
|
123
|
+
<i>Real-time error distribution, confidence metrics, and active filters.</i><br>
|
|
124
|
+
<img src="images/dashboard_analytics.jpg" alt="Dashboard" style="width:100%; height:auto; border-radius:12px;">
|
|
125
|
+
</td>
|
|
126
|
+
<td width="50%">
|
|
127
|
+
<b>🔍 Trace Explorer & AI Judge</b><br>
|
|
128
|
+
<i>Side-by-side view of the execution tree, span properties, and Human-AI collaborative labeling.</i><br>
|
|
129
|
+
<img src="images/trace_explorer.jpg" alt="Trace Explorer" style="width:100%; height:auto; border-radius:12px;">
|
|
130
|
+
</td>
|
|
131
|
+
</tr>
|
|
132
|
+
<tr>
|
|
133
|
+
<td width="50%">
|
|
134
|
+
<b>🤖 AI Librarian</b><br>
|
|
135
|
+
<i>Query your trace database using natural language and intent-based UI filters.</i><br>
|
|
136
|
+
<img src="images/ai_librarian.jpg" alt="AI Librarian" style="width:100%; height:auto; border-radius:12px;">
|
|
137
|
+
</td>
|
|
138
|
+
<td width="50%">
|
|
139
|
+
<b>🗺️ Automated Curriculum</b><br>
|
|
140
|
+
<i>Transform diagnosed failures into targeted training tasks ready for export.</i><br>
|
|
141
|
+
<img src="images/training_roadmap.jpg" alt="Training Roadmap" style="width:100%; height:auto; border-radius:12px;">
|
|
142
|
+
</td>
|
|
143
|
+
</tr>
|
|
144
|
+
</table>
|
|
145
|
+
|
|
146
|
+
---
|
|
147
|
+
|
|
148
|
+
## 🚀 Quick Start
|
|
149
|
+
|
|
150
|
+
Choose one of three installation paths based on your needs. Each option ends with the
|
|
151
|
+
same user experience: a unified UI + API at http://localhost:8000.
|
|
152
|
+
|
|
153
|
+
### Option 1: Docker (Recommended)
|
|
154
|
+
|
|
155
|
+
This is the default path for most users. It automatically provisions a production-ready
|
|
156
|
+
PostgreSQL + pgvector environment. Option 1 uses pre-built images from Docker Hub.
|
|
157
|
+
|
|
158
|
+
1. **Install the CLI**
|
|
159
|
+
```bash
|
|
160
|
+
pip install tracebrain
|
|
161
|
+
```
|
|
162
|
+
|
|
163
|
+
2. **Initialize**
|
|
164
|
+
```bash
|
|
165
|
+
tracebrain init
|
|
166
|
+
```
|
|
167
|
+
This creates a template `.env` file for API keys and configuration.
|
|
168
|
+
|
|
169
|
+
Open the `.env` file and add your API keys before continuing. If you skip this step,
|
|
170
|
+
the containers will start but AI features (Librarian, Judge) will fail.
|
|
171
|
+
|
|
172
|
+
3. **Start the platform**
|
|
173
|
+
```bash
|
|
174
|
+
tracebrain up
|
|
175
|
+
```
|
|
176
|
+
|
|
177
|
+
**Access:** http://localhost:8000 (UI + API)
|
|
178
|
+
|
|
179
|
+
Note: Option 1 uses pre-built images from Docker Hub, so you don't need Node.js or local build tools.
|
|
180
|
+
|
|
181
|
+
If you use Docker, you only need `pip install tracebrain` to get the CLI. All LLM and embedding
|
|
182
|
+
dependencies are already bundled in the Docker image, so you do not install them on your host machine.
|
|
183
|
+
|
|
184
|
+
### Option 2: Local with SQLite (Portable Mode)
|
|
185
|
+
|
|
186
|
+
Best for fast evaluation without Docker.
|
|
187
|
+
|
|
188
|
+
1. **Install the CLI**
|
|
189
|
+
```bash
|
|
190
|
+
pip install tracebrain
|
|
191
|
+
```
|
|
192
|
+
|
|
193
|
+
2. **Initialize**
|
|
194
|
+
```bash
|
|
195
|
+
tracebrain init
|
|
196
|
+
```
|
|
197
|
+
|
|
198
|
+
3. **Create local DB**
|
|
199
|
+
```bash
|
|
200
|
+
tracebrain init-db
|
|
201
|
+
```
|
|
202
|
+
This creates a local SQLite file and prepares tables.
|
|
203
|
+
|
|
204
|
+
4. **Launch**
|
|
205
|
+
```bash
|
|
206
|
+
tracebrain start
|
|
207
|
+
```
|
|
208
|
+
|
|
209
|
+
**Access:** http://localhost:8000 (UI + API)
|
|
210
|
+
|
|
211
|
+
**Technical note:** the Python backend serves the bundled React build from its internal
|
|
212
|
+
static directory, so no separate frontend build step is required.
|
|
213
|
+
|
|
214
|
+
If you run locally without Docker and want to keep the environment light, install the core package
|
|
215
|
+
first (`pip install tracebrain`). When you need a specific provider, add only that extra (for example
|
|
216
|
+
`pip install tracebrain[openai]`).
|
|
217
|
+
|
|
218
|
+
### Option 3: Development Setup (Contributor Mode)
|
|
219
|
+
|
|
220
|
+
For contributors who plan to modify TraceBrain source code.
|
|
221
|
+
|
|
222
|
+
1. **Clone the repository**
|
|
223
|
+
```bash
|
|
224
|
+
git clone https://github.com/ToolBrain/TraceBrain.git
|
|
225
|
+
cd TraceBrain
|
|
226
|
+
```
|
|
227
|
+
|
|
228
|
+
2. **Backend (editable install)**
|
|
229
|
+
```bash
|
|
230
|
+
pip install -e .
|
|
231
|
+
tracebrain start
|
|
232
|
+
```
|
|
233
|
+
|
|
234
|
+
3. **Frontend (HMR)**
|
|
235
|
+
```bash
|
|
236
|
+
cd web
|
|
237
|
+
npm install
|
|
238
|
+
npm run dev
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
**Access:**
|
|
242
|
+
- Frontend: http://localhost:5173 (Hot Module Replacement)
|
|
243
|
+
- API: http://localhost:8000
|
|
244
|
+
|
|
245
|
+
## 📦 Installation
|
|
246
|
+
|
|
247
|
+
TraceBrain supports optional extras to minimize dependencies. Install only what you need.
|
|
248
|
+
|
|
249
|
+
```bash
|
|
250
|
+
pip install tracebrain
|
|
251
|
+
|
|
252
|
+
# Optional extras
|
|
253
|
+
pip install tracebrain[embeddings-local] # local embeddings
|
|
254
|
+
pip install tracebrain[openai] # OpenAI provider
|
|
255
|
+
pip install tracebrain[anthropic] # Anthropic provider
|
|
256
|
+
pip install tracebrain[huggingface] # Hugging Face provider SDK
|
|
257
|
+
pip install tracebrain[all-llms] # OpenAI + Anthropic + Hugging Face
|
|
258
|
+
```
|
|
259
|
+
|
|
260
|
+
## 📖 Usage
|
|
261
|
+
|
|
262
|
+
### CLI Commands
|
|
263
|
+
|
|
264
|
+
| Command | Description |
|
|
265
|
+
| --- | --- |
|
|
266
|
+
| `tracebrain init` | Create a template `.env` file in the current directory. |
|
|
267
|
+
| `tracebrain init-db` | Initialize a local SQLite database. |
|
|
268
|
+
| `tracebrain up` | Launch Docker-based infrastructure. |
|
|
269
|
+
| `tracebrain start` | Run the standalone FastAPI server. |
|
|
270
|
+
|
|
271
|
+
### API Endpoints
|
|
272
|
+
|
|
273
|
+
## Concepts
|
|
274
|
+
|
|
275
|
+
- **Trace**: A single execution attempt (an "experiment").
|
|
276
|
+
- **Episode**: A logical group of traces (attempts) aimed at solving a single user task.
|
|
277
|
+
|
|
278
|
+
**Traces**
|
|
279
|
+
- `POST /api/v1/traces` - Create a new trace
|
|
280
|
+
- `POST /api/v1/traces/init` - Initialize a trace before spans are available
|
|
281
|
+
- `GET /api/v1/traces` - List all traces
|
|
282
|
+
- `GET /api/v1/traces/{trace_id}` - Get trace details
|
|
283
|
+
- `POST /api/v1/traces/{trace_id}/feedback` - Add feedback to a trace
|
|
284
|
+
- `GET /api/v1/export/traces` - Export raw OTLP traces as JSONL (supports status, min_rating, error_type, min_confidence, max_confidence, start_time, end_time)
|
|
285
|
+
|
|
286
|
+
**Episodes**
|
|
287
|
+
- `GET /api/v1/episodes` - List all episodes along with their full traces
|
|
288
|
+
- `GET /api/v1/episodes/summary` - List episodes with aggregated metrics
|
|
289
|
+
- `GET /api/v1/episodes/{episode_id}` - Get episode details with trace summaries
|
|
290
|
+
- `GET /api/v1/episodes/{episode_id}/traces` - Get all full traces in an episode
|
|
291
|
+
|
|
292
|
+
**Analytics**
|
|
293
|
+
- `GET /api/v1/stats` - Get overall statistics
|
|
294
|
+
- `GET /api/v1/analytics/tool_usage` - Get tool usage analytics
|
|
295
|
+
|
|
296
|
+
**Natural Language Queries**
|
|
297
|
+
- `POST /api/v1/natural_language_query` - Query traces with natural language
|
|
298
|
+
- Uses Librarian provider/model from Settings (stored in DB)
|
|
299
|
+
- Requires the matching provider API key in environment (`{PROVIDER}_API_KEY`)
|
|
300
|
+
- Supports `session_id` for chat memory and returns `suggestions`
|
|
301
|
+
- `GET /api/v1/librarian_sessions/{session_id}` - Load stored chat history
|
|
302
|
+
|
|
303
|
+
**AI Evaluation**
|
|
304
|
+
- `POST /api/v1/ai_evaluate/{trace_id}` - Evaluate a trace using the configured Judge provider/model
|
|
305
|
+
- `POST /api/v1/ops/batch_evaluate` - Run AI judge over recent traces missing `tracebrain.ai_evaluation`
|
|
306
|
+
- `POST /api/v1/traces` triggers background evaluation when no AI draft exists
|
|
307
|
+
|
|
308
|
+
**Operations**
|
|
309
|
+
- `DELETE /api/v1/ops/traces/cleanup` - Delete traces that match cleanup filters
|
|
310
|
+
|
|
311
|
+
**Semantic Search**
|
|
312
|
+
- `GET /api/v1/traces/search` - Find similar traces using vector similarity
|
|
313
|
+
|
|
314
|
+
**Governance Signals**
|
|
315
|
+
- `POST /api/v1/traces/{trace_id}/signal` - Update trace status/priority
|
|
316
|
+
|
|
317
|
+
**Curriculum**
|
|
318
|
+
- `POST /api/v1/curriculum/generate` - Generate tasks from failed/low-rated traces using configured Curator provider/model
|
|
319
|
+
- `GET /api/v1/curriculum` - List pending curriculum tasks
|
|
320
|
+
- `GET /api/v1/curriculum/export` - Export curriculum tasks as JSONL
|
|
321
|
+
- `DELETE /api/v1/curriculum/{task_id}` - Delete a curriculum task
|
|
322
|
+
- `DELETE /api/v1/curriculum` - Delete all curriculum tasks
|
|
323
|
+
- `PATCH /api/v1/curriculum/{task_id}/complete` - Mark a curriculum task as complete
|
|
324
|
+
- `PATCH /api/v1/curriculum/complete` - Mark all curriculum tasks as complete
|
|
325
|
+
|
|
326
|
+
**History**
|
|
327
|
+
- `GET /api/v1/history` - Retrieve history of viewed traces and episodes
|
|
328
|
+
- `POST /api/v1/history` - Add or update last time trace or episode was viewed
|
|
329
|
+
- `DELETE /api/v1/history` - Clear all traces and episodes in viewed history
|
|
330
|
+
|
|
331
|
+
**Settings**
|
|
332
|
+
- `GET /api/v1/settings` - Retrieve current LLM routing settings
|
|
333
|
+
- `POST /api/v1/settings` - Update LLM routing + provider API keys (`librarian_*`, `judge_*`, `curator_*`, `*_api_key`)
|
|
334
|
+
|
|
335
|
+
### Trace Status and Needs Review
|
|
336
|
+
|
|
337
|
+
Trace status is stored in both the database column `status` and in
|
|
338
|
+
`attributes.tracebrain.trace.status` for UI and API consistency.
|
|
339
|
+
|
|
340
|
+
**Supported statuses:**
|
|
341
|
+
|
|
342
|
+
- `running` - Trace is in progress or not finalized.
|
|
343
|
+
- `completed` - Trace has been reviewed and finalized.
|
|
344
|
+
- `needs_review` - Trace requires human attention.
|
|
345
|
+
- `failed` - Trace is marked as failed.
|
|
346
|
+
|
|
347
|
+
**When `needs_review` is set:**
|
|
348
|
+
|
|
349
|
+
- **Agent Signal:** The agent calls `request_human_intervention` (Active Help Request).
|
|
350
|
+
- **AI Judgment:** `tracebrain.ai_evaluation.confidence` < 0.75, or
|
|
351
|
+
`tracebrain.ai_evaluation.error_type` is one of:
|
|
352
|
+
`logic_loop`, `hallucination`, `invalid_tool_usage`, `tool_execution_error`,
|
|
353
|
+
`format_error`, `misinterpretation`, `context_overflow`.
|
|
354
|
+
- **System Error:** Any span has `otel.status_code` = `ERROR`.
|
|
355
|
+
|
|
356
|
+
### Configuration (Settings + Provider Keys)
|
|
357
|
+
|
|
358
|
+
TraceBrain now separates configuration into two layers:
|
|
359
|
+
|
|
360
|
+
- **Runtime routing settings (DB-backed):** provider/model for Librarian, Judge, Curator.
|
|
361
|
+
- **Secrets and infra flags (env):** provider API keys, embedding config, debug flags.
|
|
362
|
+
|
|
363
|
+
Runtime settings are editable from the UI or `POST /api/v1/settings`, and are persisted in the database.
|
|
364
|
+
On first startup (when DB settings row does not exist), values are bootstrapped from `DEFAULT_*` env variables.
|
|
365
|
+
|
|
366
|
+
#### 1) Provider API keys (environment variables)
|
|
367
|
+
|
|
368
|
+
Use provider-specific key names only:
|
|
369
|
+
|
|
370
|
+
```bash
|
|
371
|
+
OPENAI_API_KEY=your_openai_api_key_here
|
|
372
|
+
GEMINI_API_KEY=your_gemini_api_key_here
|
|
373
|
+
# ANTHROPIC_API_KEY=your_anthropic_api_key_here
|
|
374
|
+
# HUGGINGFACE_API_KEY=your_huggingface_api_key_here
|
|
375
|
+
```
|
|
376
|
+
|
|
377
|
+
Optional provider base URLs:
|
|
378
|
+
|
|
379
|
+
```bash
|
|
380
|
+
# Optional: custom endpoints/proxies
|
|
381
|
+
# OPENAI_BASE_URL=https://your-openai-compatible-endpoint/v1
|
|
382
|
+
# ANTHROPIC_BASE_URL=https://your-anthropic-endpoint
|
|
383
|
+
# HUGGINGFACE_BASE_URL=https://your-huggingface-endpoint
|
|
384
|
+
```
|
|
385
|
+
|
|
386
|
+
**Hugging Face local inference (vLLM/TGI):**
|
|
387
|
+
|
|
388
|
+
If you run a local inference server (vLLM or TGI), set `HUGGINGFACE_BASE_URL` to your server URL.
|
|
389
|
+
When this is set, TraceBrain routes Hugging Face traffic to your local endpoint instead of the
|
|
390
|
+
Hugging Face cloud API.
|
|
391
|
+
|
|
392
|
+
```bash
|
|
393
|
+
# Example: local vLLM/TGI endpoint
|
|
394
|
+
HUGGINGFACE_BASE_URL=http://localhost:8000
|
|
395
|
+
HUGGINGFACE_API_KEY=your_token_if_required
|
|
396
|
+
```
|
|
397
|
+
|
|
398
|
+
#### 2) Bootstrap defaults for first run (environment variables)
|
|
399
|
+
|
|
400
|
+
These defaults are used only when settings are not yet stored in DB:
|
|
401
|
+
|
|
402
|
+
```bash
|
|
403
|
+
DEFAULT_LIBRARIAN_PROVIDER=openai
|
|
404
|
+
DEFAULT_LIBRARIAN_MODEL=gpt-4o-mini
|
|
405
|
+
|
|
406
|
+
DEFAULT_JUDGE_PROVIDER=gemini
|
|
407
|
+
DEFAULT_JUDGE_MODEL=gemini-2.5-flash
|
|
408
|
+
|
|
409
|
+
DEFAULT_CURATOR_PROVIDER=gemini
|
|
410
|
+
DEFAULT_CURATOR_MODEL=gemini-2.5-flash
|
|
411
|
+
```
|
|
412
|
+
|
|
413
|
+
#### 3) Global flags and embedding configuration
|
|
414
|
+
|
|
415
|
+
```bash
|
|
416
|
+
LLM_DEBUG=false
|
|
417
|
+
|
|
418
|
+
EMBEDDING_PROVIDER=local
|
|
419
|
+
EMBEDDING_MODEL=all-MiniLM-L6-v2
|
|
420
|
+
|
|
421
|
+
# For cloud embeddings
|
|
422
|
+
# EMBEDDING_API_KEY=your_embedding_key
|
|
423
|
+
# EMBEDDING_BASE_URL=https://your-embedding-endpoint/v1
|
|
424
|
+
```
|
|
425
|
+
|
|
426
|
+
#### Settings API payload
|
|
427
|
+
|
|
428
|
+
`GET /api/v1/settings` and `POST /api/v1/settings` use this shape:
|
|
429
|
+
|
|
430
|
+
```json
|
|
431
|
+
{
|
|
432
|
+
"librarian_provider": "openai",
|
|
433
|
+
"librarian_model": "gpt-4o-mini",
|
|
434
|
+
"judge_provider": "gemini",
|
|
435
|
+
"judge_model": "gemini-2.5-flash",
|
|
436
|
+
"curator_provider": "gemini",
|
|
437
|
+
"curator_model": "gemini-2.5-flash",
|
|
438
|
+
"openai_api_key": "sk-...abcd",
|
|
439
|
+
"gemini_api_key": "AIza...wxyz",
|
|
440
|
+
"anthropic_api_key": null,
|
|
441
|
+
"huggingface_api_key": null
|
|
442
|
+
}
|
|
443
|
+
```
|
|
444
|
+
|
|
445
|
+
Notes:
|
|
446
|
+
- `GET /api/v1/settings` returns masked API keys for safety.
|
|
447
|
+
- `POST /api/v1/settings` accepts plain-text API keys when you want to add or rotate keys.
|
|
448
|
+
- If a DB key is empty, TraceBrain falls back to the corresponding environment variable (`OPENAI_API_KEY`, `GEMINI_API_KEY`, `ANTHROPIC_API_KEY`, `HUGGINGFACE_API_KEY`).
|
|
449
|
+
|
|
450
|
+
**Example API Usage:**
|
|
451
|
+
|
|
452
|
+
```python
|
|
453
|
+
import requests
|
|
454
|
+
|
|
455
|
+
# Create a trace
|
|
456
|
+
response = requests.post("http://localhost:8000/api/v1/traces", json={
|
|
457
|
+
"trace_id": "trace-001",
|
|
458
|
+
"spans": [
|
|
459
|
+
{
|
|
460
|
+
"span_id": "span-001",
|
|
461
|
+
"trace_id": "trace-001",
|
|
462
|
+
"name": "User Request",
|
|
463
|
+
"start_time": "2024-01-01T10:00:00Z",
|
|
464
|
+
"end_time": "2024-01-01T10:00:05Z",
|
|
465
|
+
"attributes": {
|
|
466
|
+
"tracebrain.span.type": "user_request",
|
|
467
|
+
"tracebrain.content.new_content": "What's the stock price of NVIDIA?"
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
]
|
|
471
|
+
})
|
|
472
|
+
|
|
473
|
+
# Add feedback
|
|
474
|
+
requests.post("http://localhost:8000/api/v1/traces/trace-001/feedback", json={
|
|
475
|
+
"rating": 5,
|
|
476
|
+
"tags": ["accurate", "fast"],
|
|
477
|
+
"comment": "Great response!",
|
|
478
|
+
"metadata": {
|
|
479
|
+
"outcome": "success",
|
|
480
|
+
"efficiency_score": 0.95
|
|
481
|
+
}
|
|
482
|
+
})
|
|
483
|
+
```
|
|
484
|
+
|
|
485
|
+
### React Frontend
|
|
486
|
+
|
|
487
|
+
The admin UI provides:
|
|
488
|
+
- **Trace Browser**: View all traces with filters
|
|
489
|
+
- **Trace Details**: Expandable span tree visualization and compare related traces
|
|
490
|
+
- **Feedback Form**: Rate and tag traces
|
|
491
|
+
- **Analytics Dashboard**: Stats, tool usage charts
|
|
492
|
+
- **AI Librarian**: Session-aware chat with suggestions and history restore
|
|
493
|
+
- **AI Evaluation**: AI draft is auto-generated and experts verify or edit before finalizing
|
|
494
|
+
- **Governance Signal**: Mark traces with status and priority
|
|
495
|
+
- **Curriculum**: Generate and review training tasks
|
|
496
|
+
|
|
497
|
+
Frontend dev server (local development only):
|
|
498
|
+
|
|
499
|
+
```bash
|
|
500
|
+
cd web
|
|
501
|
+
npm install
|
|
502
|
+
npm run dev
|
|
503
|
+
```
|
|
504
|
+
|
|
505
|
+
### Embeddings and Semantic Search
|
|
506
|
+
|
|
507
|
+
Semantic search is used in these places:
|
|
508
|
+
- **API:** `GET /api/v1/traces/search` for vector similarity over traces
|
|
509
|
+
- **Experience Retrieval:** `search_similar_traces` and `search_past_experiences` agent tools
|
|
510
|
+
- **AI Librarian:** uses semantic search to surface relevant past traces when enabled
|
|
511
|
+
|
|
512
|
+
Configure embeddings for vector search and experience retrieval:
|
|
513
|
+
|
|
514
|
+
```bash
|
|
515
|
+
# local (default)
|
|
516
|
+
EMBEDDING_PROVIDER=local
|
|
517
|
+
EMBEDDING_MODEL=all-MiniLM-L6-v2
|
|
518
|
+
|
|
519
|
+
# cloud (OpenAI/Gemini)
|
|
520
|
+
EMBEDDING_PROVIDER=openai
|
|
521
|
+
EMBEDDING_API_KEY=your-key
|
|
522
|
+
EMBEDDING_MODEL=text-embedding-3-small
|
|
523
|
+
|
|
524
|
+
# optional for OpenAI-compatible endpoints
|
|
525
|
+
EMBEDDING_BASE_URL=https://your-endpoint/v1
|
|
526
|
+
```
|
|
527
|
+
|
|
528
|
+
**When embeddings run:** embeddings are created at trace ingest time, not during server startup.
|
|
529
|
+
|
|
530
|
+
**Do I need local embeddings?** No. You can skip `embeddings-local` entirely and still run the platform. If no embedding provider is configured, traces still ingest and all non-semantic features work normally; only vector search (and features that rely on it) are unavailable.
|
|
531
|
+
|
|
532
|
+
## 🔌 Integration with Your Agent
|
|
533
|
+
|
|
534
|
+
### Using the TraceStore Client (read/query)
|
|
535
|
+
|
|
536
|
+
This section focuses on read/query operations. For logging traces, see the
|
|
537
|
+
`trace_scope` section below.
|
|
538
|
+
|
|
539
|
+
```python
|
|
540
|
+
import json
|
|
541
|
+
|
|
542
|
+
from tracebrain.sdk.client import TraceClient, TraceScope
|
|
543
|
+
|
|
544
|
+
client = TraceClient(base_url="http://localhost:8000")
|
|
545
|
+
|
|
546
|
+
# Query traces
|
|
547
|
+
traces = client.list_traces()
|
|
548
|
+
|
|
549
|
+
# Export traces as JSONL
|
|
550
|
+
jsonl_payload = client.export_traces(min_rating=4, limit=100)
|
|
551
|
+
|
|
552
|
+
# Parse JSONL into Python objects
|
|
553
|
+
trace_items = [json.loads(line) for line in jsonl_payload.splitlines() if line.strip()]
|
|
554
|
+
|
|
555
|
+
# Reconstruct messages or turns from OTLP
|
|
556
|
+
trace_data = client.get_trace("my-trace-001")
|
|
557
|
+
|
|
558
|
+
# to_messages: rebuilds chat message list (role/content) from spans
|
|
559
|
+
messages = TraceScope.to_messages(trace_data)
|
|
560
|
+
# Example: messages[:2] -> [{"role": "user", "content": "..."}, {"role": "assistant", "content": "..."}]
|
|
561
|
+
|
|
562
|
+
# to_turns: groups messages into conversation turns for UI/analysis
|
|
563
|
+
turns = TraceScope.to_turns(trace_data)
|
|
564
|
+
# Example: turns[0] -> {"user": "...", "assistant": "..."}
|
|
565
|
+
|
|
566
|
+
# to_tracebrain_turns: returns TraceBrain-native turn objects with metadata
|
|
567
|
+
tracebrain_turns = TraceScope.to_tracebrain_turns(trace_data)
|
|
568
|
+
# Example: tracebrain_turns[0] -> {"turn_id": "...", "messages": [...], "span_ids": [...]}
|
|
569
|
+
```
|
|
570
|
+
|
|
571
|
+
### Trace Init and trace_scope (recommended for all runs)
|
|
572
|
+
|
|
573
|
+
Use `trace_scope` for every agent run you plan to log. It pre-registers a trace
|
|
574
|
+
via `/api/v1/traces/init`, sets the trace ID in a context-local store (safe for
|
|
575
|
+
async and multi-thread usage), and uploads the trace when the scope exits. This
|
|
576
|
+
is required if your agent might call `request_human_intervention` (Active Help
|
|
577
|
+
Request) so the help signal is attached to the correct trace.
|
|
578
|
+
|
|
579
|
+
**Recommended: use `trace_scope` (auto init + auto log)**
|
|
580
|
+
|
|
581
|
+
```python
|
|
582
|
+
from tracebrain import TraceClient
|
|
583
|
+
from my_converters import convert_smolagent_to_otlp
|
|
584
|
+
|
|
585
|
+
client = TraceClient(base_url="http://localhost:8000")
|
|
586
|
+
|
|
587
|
+
with client.trace_scope(system_prompt="You are a helpful assistant") as trace:
|
|
588
|
+
agent = MyAgent(system_prompt="You are a helpful assistant")
|
|
589
|
+
agent.run("Summarize this report")
|
|
590
|
+
|
|
591
|
+
otlp_trace = convert_smolagent_to_otlp(agent)
|
|
592
|
+
trace["spans"] = otlp_trace.get("spans", [])
|
|
593
|
+
```
|
|
594
|
+
|
|
595
|
+
**Advanced: manual trace ID + manual log**
|
|
596
|
+
|
|
597
|
+
```python
|
|
598
|
+
from tracebrain import TraceClient
|
|
599
|
+
from tracebrain.sdk.trace_context import set_trace_id, get_trace_id
|
|
600
|
+
from my_converters import convert_smolagent_to_otlp
|
|
601
|
+
|
|
602
|
+
client = TraceClient(base_url="http://localhost:8000")
|
|
603
|
+
set_trace_id("trace_123")
|
|
604
|
+
|
|
605
|
+
agent = MyAgent(system_prompt="You are a helpful assistant")
|
|
606
|
+
agent.run("Summarize this report")
|
|
607
|
+
|
|
608
|
+
otlp_trace = convert_smolagent_to_otlp(agent)
|
|
609
|
+
otlp_trace["trace_id"] = get_trace_id() or "trace_123"
|
|
610
|
+
client.log_trace(otlp_trace)
|
|
611
|
+
```
|
|
612
|
+
|
|
613
|
+
### Agent Tools (Experience Retrieval + Active Help Request)
|
|
614
|
+
|
|
615
|
+
When to use:
|
|
616
|
+
|
|
617
|
+
- Use `search_past_experiences` to fetch high-quality, previously successful traces for similar tasks.
|
|
618
|
+
- Use `search_similar_traces` when you need semantic similarity over trace content.
|
|
619
|
+
- Use `request_human_intervention` when the agent is blocked, uncertain, or needs clarification.
|
|
620
|
+
|
|
621
|
+
```python
|
|
622
|
+
from tracebrain.sdk import (
|
|
623
|
+
search_past_experiences,
|
|
624
|
+
search_similar_traces,
|
|
625
|
+
request_human_intervention,
|
|
626
|
+
)
|
|
627
|
+
|
|
628
|
+
# Retrieve prior successful experiences
|
|
629
|
+
experiences = search_past_experiences("resolve a tool error", min_rating=4, limit=3)
|
|
630
|
+
|
|
631
|
+
# Semantic search over traces
|
|
632
|
+
similar = search_similar_traces("multi-step planning", min_rating=4, limit=3)
|
|
633
|
+
|
|
634
|
+
# Escalate to human when the agent is blocked
|
|
635
|
+
help_request = request_human_intervention("User request is ambiguous, need clarification")
|
|
636
|
+
```
|
|
637
|
+
|
|
638
|
+
### Building a Custom Converter
|
|
639
|
+
|
|
640
|
+
TraceBrain uses the **TraceBrain OTLP (OpenTelemetry Protocol) format** - a delta-based trace schema with parent_id chains for conversation reconstruction.
|
|
641
|
+
|
|
642
|
+
See [docs/Converter.md](docs/Converter.md) for:
|
|
643
|
+
- OTLP schema explanation (parent_id, new_content, delta-based design)
|
|
644
|
+
- Step-by-step conversion recipe
|
|
645
|
+
- Python template code with examples
|
|
646
|
+
|
|
647
|
+
**Quick Example:**
|
|
648
|
+
|
|
649
|
+
```python
|
|
650
|
+
import uuid
|
|
651
|
+
|
|
652
|
+
from tracebrain.core.schema import TraceBrainAttributes, SpanType
|
|
653
|
+
|
|
654
|
+
def convert_my_agent_to_otlp(agent_data):
|
|
655
|
+
spans = []
|
|
656
|
+
parent_id = None
|
|
657
|
+
for step in agent_data.steps:
|
|
658
|
+
spans.append({
|
|
659
|
+
"span_id": str(uuid.uuid4()),
|
|
660
|
+
"parent_id": parent_id, # Chain spans together
|
|
661
|
+
"name": step.action,
|
|
662
|
+
"attributes": {
|
|
663
|
+
TraceBrainAttributes.SPAN_TYPE: SpanType.LLM_INFERENCE,
|
|
664
|
+
TraceBrainAttributes.LLM_NEW_CONTENT: step.output, # Delta content only
|
|
665
|
+
TraceBrainAttributes.TOOL_NAME: step.tool_name,
|
|
666
|
+
}
|
|
667
|
+
})
|
|
668
|
+
parent_id = spans[-1]["span_id"]
|
|
669
|
+
return {"trace_id": agent_data.id, "spans": spans}
|
|
670
|
+
```
|
|
671
|
+
|
|
672
|
+
## 📁 Project Structure
|
|
673
|
+
|
|
674
|
+
```
|
|
675
|
+
TraceBrain/
|
|
676
|
+
├── src/
|
|
677
|
+
│ ├── tracebrain/ # Core package logic
|
|
678
|
+
│ │ ├── api/v1/ # FastAPI REST endpoints
|
|
679
|
+
│ │ ├── core/ # TraceStore, schema, agent logic
|
|
680
|
+
│ │ ├── db/ # Database session management
|
|
681
|
+
│ │ ├── resources/ # Bundled Docker + sample data
|
|
682
|
+
│ │ ├── static/ # Bundled React build artifacts
|
|
683
|
+
│ │ ├── sdk/ # Client SDK
|
|
684
|
+
│ │ ├── cli.py # CLI commands
|
|
685
|
+
│ │ └── main.py # FastAPI app entry
|
|
686
|
+
├── docs/ # Documentation
|
|
687
|
+
├── web/ # React source code (contributors)
|
|
688
|
+
├── pyproject.toml # Project metadata
|
|
689
|
+
└── README.md
|
|
690
|
+
```
|
|
691
|
+
|
|
692
|
+
## 🛠️ Development
|
|
693
|
+
|
|
694
|
+
### Running Tests
|
|
695
|
+
|
|
696
|
+
No automated test suite is included yet.
|
|
697
|
+
|
|
698
|
+
### Seeding Sample Data
|
|
699
|
+
|
|
700
|
+
```bash
|
|
701
|
+
tracebrain seed
|
|
702
|
+
```
|
|
703
|
+
|
|
704
|
+
### Database Migrations
|
|
705
|
+
|
|
706
|
+
No migration tooling is included yet. For schema changes:
|
|
707
|
+
|
|
708
|
+
1. Update models in `src/tracebrain/db/base.py`
|
|
709
|
+
2. Recreate the database:
|
|
710
|
+
- **SQLite (local):** delete `tracebrain_traces.db`, then run `tracebrain init-db`
|
|
711
|
+
- **PostgreSQL (Docker):** `docker compose -f docker/docker-compose.yml down -v` then `tracebrain up`
|
|
712
|
+
|
|
713
|
+
### Working with JSONB Queries (PostgreSQL)
|
|
714
|
+
|
|
715
|
+
When querying JSONB fields:
|
|
716
|
+
|
|
717
|
+
```python
|
|
718
|
+
from sqlalchemy import func, cast
|
|
719
|
+
from sqlalchemy.dialects.postgresql import JSONB
|
|
720
|
+
|
|
721
|
+
# Extract text from JSONB
|
|
722
|
+
span_type = func.jsonb_extract_path_text(Span.attributes, "tracebrain.span.type")
|
|
723
|
+
|
|
724
|
+
# Cast for complex queries
|
|
725
|
+
rating = func.jsonb_extract_path_text(cast(Trace.feedback, JSONB), "rating")
|
|
726
|
+
```
|
|
727
|
+
|
|
728
|
+
## 📚 Documentation
|
|
729
|
+
|
|
730
|
+
- **[Building Your Own Trace Converter](docs/Converter.md)** - Complete guide for integrating custom agent frameworks
|
|
731
|
+
- **[LLM Provider Guide](docs/LLMProviders.md)** - Use TraceBrain LLM providers and attach usage metadata
|
|
732
|
+
- **[Trace Reconstruction Guide](docs/Reconstructor.md)** - Rebuild full context from delta traces for training
|
|
733
|
+
- **[Sample OTLP Traces](data/TraceBrain%20OTLP%20Trace%20Samples)** - Example trace files
|
|
734
|
+
- **[API Documentation](http://localhost:8000/docs)** - Interactive OpenAPI docs (when server is running)
|
|
735
|
+
- **[Docker Setup Guide](docker/README.md)** - Docker-specific instructions
|
|
736
|
+
|
|
737
|
+
## 🤝 Contributing
|
|
738
|
+
|
|
739
|
+
Contributions are welcome! Here's how to get started:
|
|
740
|
+
|
|
741
|
+
1. Fork the repository
|
|
742
|
+
2. Create a feature branch: `git checkout -b feature/amazing-feature`
|
|
743
|
+
3. Make your changes and test thoroughly
|
|
744
|
+
4. Commit with clear messages: `git commit -m 'Add amazing feature'`
|
|
745
|
+
5. Push to your fork: `git push origin feature/amazing-feature`
|
|
746
|
+
6. Open a Pull Request
|
|
747
|
+
|
|
748
|
+
**Development Guidelines:**
|
|
749
|
+
- Follow PEP 8 style guide
|
|
750
|
+
- Add tests for new features
|
|
751
|
+
- Update documentation as needed
|
|
752
|
+
- Ensure Docker builds pass
|
|
753
|
+
|
|
754
|
+
## 🐛 Troubleshooting
|
|
755
|
+
|
|
756
|
+
### Docker changes not reflected
|
|
757
|
+
|
|
758
|
+
If code changes aren't picked up after `tracebrain up --build`:
|
|
759
|
+
|
|
760
|
+
```bash
|
|
761
|
+
tracebrain down
|
|
762
|
+
docker compose -f docker/docker-compose.yml build --no-cache
|
|
763
|
+
tracebrain up
|
|
764
|
+
```
|
|
765
|
+
|
|
766
|
+
### PostgreSQL connection errors
|
|
767
|
+
|
|
768
|
+
Ensure PostgreSQL is running and check connection string in `src/tracebrain/config.py`:
|
|
769
|
+
|
|
770
|
+
```python
|
|
771
|
+
DATABASE_URL = "postgresql://traceuser:tracepass@localhost:5432/tracedb"
|
|
772
|
+
```
|
|
773
|
+
|
|
774
|
+
### Tool usage analytics showing incorrect data
|
|
775
|
+
|
|
776
|
+
After updating `store.py`, rebuild Docker containers to apply JSONB query fixes.
|
|
777
|
+
|
|
778
|
+
## 📄 License
|
|
779
|
+
|
|
780
|
+
This project is licensed under the MIT License - see the LICENSE file for details.
|
|
781
|
+
|
|
782
|
+
## 🙏 Acknowledgments
|
|
783
|
+
|
|
784
|
+
- Built with [FastAPI](https://fastapi.tiangolo.com/)
|
|
785
|
+
- Database powered by [SQLAlchemy](https://www.sqlalchemy.org/)
|
|
786
|
+
- UI with [React (Vite)](https://vitejs.dev/) + [MUI](https://mui.com/)
|
|
787
|
+
- Inspired by [OpenTelemetry](https://opentelemetry.io/) standards
|
|
788
|
+
|
|
789
|
+
---
|
|
790
|
+
|
|
791
|
+
**Made with ❤️ for the AI agent community**
|
|
792
|
+
|
|
793
|
+
For questions or support, please open an issue on GitHub.
|