flock-core 0.5.11__py3-none-any.whl → 0.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/__init__.py +1 -1
- flock/agent/__init__.py +30 -0
- flock/agent/builder_helpers.py +192 -0
- flock/agent/builder_validator.py +169 -0
- flock/agent/component_lifecycle.py +325 -0
- flock/agent/context_resolver.py +141 -0
- flock/agent/mcp_integration.py +212 -0
- flock/agent/output_processor.py +304 -0
- flock/api/__init__.py +20 -0
- flock/{api_models.py → api/models.py} +0 -2
- flock/{service.py → api/service.py} +3 -3
- flock/cli.py +2 -2
- flock/components/__init__.py +41 -0
- flock/components/agent/__init__.py +22 -0
- flock/{components.py → components/agent/base.py} +4 -3
- flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
- flock/components/orchestrator/__init__.py +22 -0
- flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
- flock/components/orchestrator/circuit_breaker.py +95 -0
- flock/components/orchestrator/collection.py +143 -0
- flock/components/orchestrator/deduplication.py +78 -0
- flock/core/__init__.py +30 -0
- flock/core/agent.py +953 -0
- flock/{artifacts.py → core/artifacts.py} +1 -1
- flock/{context_provider.py → core/context_provider.py} +3 -3
- flock/core/orchestrator.py +1102 -0
- flock/{store.py → core/store.py} +99 -454
- flock/{subscription.py → core/subscription.py} +1 -1
- flock/dashboard/collector.py +5 -5
- flock/dashboard/graph_builder.py +7 -7
- flock/dashboard/routes/__init__.py +21 -0
- flock/dashboard/routes/control.py +327 -0
- flock/dashboard/routes/helpers.py +340 -0
- flock/dashboard/routes/themes.py +76 -0
- flock/dashboard/routes/traces.py +521 -0
- flock/dashboard/routes/websocket.py +108 -0
- flock/dashboard/service.py +43 -1316
- flock/engines/dspy/__init__.py +20 -0
- flock/engines/dspy/artifact_materializer.py +216 -0
- flock/engines/dspy/signature_builder.py +474 -0
- flock/engines/dspy/streaming_executor.py +858 -0
- flock/engines/dspy_engine.py +45 -1330
- flock/engines/examples/simple_batch_engine.py +2 -2
- flock/examples.py +7 -7
- flock/logging/logging.py +1 -16
- flock/models/__init__.py +10 -0
- flock/orchestrator/__init__.py +45 -0
- flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
- flock/orchestrator/artifact_manager.py +168 -0
- flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
- flock/orchestrator/component_runner.py +389 -0
- flock/orchestrator/context_builder.py +167 -0
- flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
- flock/orchestrator/event_emitter.py +167 -0
- flock/orchestrator/initialization.py +184 -0
- flock/orchestrator/lifecycle_manager.py +226 -0
- flock/orchestrator/mcp_manager.py +202 -0
- flock/orchestrator/scheduler.py +189 -0
- flock/orchestrator/server_manager.py +234 -0
- flock/orchestrator/tracing.py +147 -0
- flock/storage/__init__.py +10 -0
- flock/storage/artifact_aggregator.py +158 -0
- flock/storage/in_memory/__init__.py +6 -0
- flock/storage/in_memory/artifact_filter.py +114 -0
- flock/storage/in_memory/history_aggregator.py +115 -0
- flock/storage/sqlite/__init__.py +10 -0
- flock/storage/sqlite/agent_history_queries.py +154 -0
- flock/storage/sqlite/consumption_loader.py +100 -0
- flock/storage/sqlite/query_builder.py +112 -0
- flock/storage/sqlite/query_params_builder.py +91 -0
- flock/storage/sqlite/schema_manager.py +168 -0
- flock/storage/sqlite/summary_queries.py +194 -0
- flock/utils/__init__.py +14 -0
- flock/utils/async_utils.py +67 -0
- flock/{runtime.py → utils/runtime.py} +3 -3
- flock/utils/time_utils.py +53 -0
- flock/utils/type_resolution.py +38 -0
- flock/{utilities.py → utils/utilities.py} +2 -2
- flock/utils/validation.py +57 -0
- flock/utils/visibility.py +79 -0
- flock/utils/visibility_utils.py +134 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/METADATA +18 -4
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/RECORD +89 -33
- flock/agent.py +0 -1578
- flock/orchestrator.py +0 -1983
- /flock/{visibility.py → core/visibility.py} +0 -0
- /flock/{system_artifacts.py → models/system_artifacts.py} +0 -0
- /flock/{helper → utils}/cli_helper.py +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/WHEEL +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/entry_points.txt +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,521 @@
|
|
|
1
|
+
"""Trace-related API routes for dashboard."""
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from datetime import UTC, datetime
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Any
|
|
7
|
+
|
|
8
|
+
import duckdb
|
|
9
|
+
from fastapi import FastAPI, HTTPException
|
|
10
|
+
|
|
11
|
+
from flock.core import Flock
|
|
12
|
+
from flock.core.store import FilterConfig
|
|
13
|
+
from flock.dashboard.collector import DashboardEventCollector
|
|
14
|
+
from flock.dashboard.websocket import WebSocketManager
|
|
15
|
+
from flock.logging.logging import get_logger
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
logger = get_logger("dashboard.routes.traces")
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def register_trace_routes(
|
|
22
|
+
app: FastAPI,
|
|
23
|
+
orchestrator: Flock,
|
|
24
|
+
websocket_manager: WebSocketManager,
|
|
25
|
+
event_collector: DashboardEventCollector,
|
|
26
|
+
) -> None:
|
|
27
|
+
"""Register trace-related API endpoints.
|
|
28
|
+
|
|
29
|
+
Args:
|
|
30
|
+
app: FastAPI application instance
|
|
31
|
+
orchestrator: Flock orchestrator instance
|
|
32
|
+
websocket_manager: WebSocket manager for real-time updates
|
|
33
|
+
event_collector: Dashboard event collector
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
@app.get("/api/traces")
|
|
37
|
+
async def get_traces() -> list[dict[str, Any]]:
|
|
38
|
+
"""Get OpenTelemetry traces from DuckDB.
|
|
39
|
+
|
|
40
|
+
Returns list of trace spans in OTEL format.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
[
|
|
44
|
+
{
|
|
45
|
+
"name": "Agent.execute",
|
|
46
|
+
"context": {
|
|
47
|
+
"trace_id": "...",
|
|
48
|
+
"span_id": "...",
|
|
49
|
+
...
|
|
50
|
+
},
|
|
51
|
+
"start_time": 1234567890,
|
|
52
|
+
"end_time": 1234567891,
|
|
53
|
+
"attributes": {...},
|
|
54
|
+
"status": {...}
|
|
55
|
+
},
|
|
56
|
+
...
|
|
57
|
+
]
|
|
58
|
+
"""
|
|
59
|
+
db_path = Path(".flock/traces.duckdb")
|
|
60
|
+
|
|
61
|
+
if not db_path.exists():
|
|
62
|
+
logger.warning(
|
|
63
|
+
"Trace database not found. Make sure FLOCK_AUTO_TRACE=true FLOCK_TRACE_FILE=true"
|
|
64
|
+
)
|
|
65
|
+
return []
|
|
66
|
+
|
|
67
|
+
try:
|
|
68
|
+
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
69
|
+
# Query all spans from DuckDB
|
|
70
|
+
result = conn.execute("""
|
|
71
|
+
SELECT
|
|
72
|
+
trace_id, span_id, parent_id, name, service, operation,
|
|
73
|
+
kind, start_time, end_time, duration_ms,
|
|
74
|
+
status_code, status_description,
|
|
75
|
+
attributes, events, links, resource
|
|
76
|
+
FROM spans
|
|
77
|
+
ORDER BY start_time DESC
|
|
78
|
+
""").fetchall()
|
|
79
|
+
|
|
80
|
+
spans = []
|
|
81
|
+
for row in result:
|
|
82
|
+
# Reconstruct OTEL span format from DuckDB row
|
|
83
|
+
span = {
|
|
84
|
+
"name": row[3], # name
|
|
85
|
+
"context": {
|
|
86
|
+
"trace_id": row[0], # trace_id
|
|
87
|
+
"span_id": row[1], # span_id
|
|
88
|
+
"trace_flags": 0,
|
|
89
|
+
"trace_state": "",
|
|
90
|
+
},
|
|
91
|
+
"kind": row[6], # kind
|
|
92
|
+
"start_time": row[7], # start_time
|
|
93
|
+
"end_time": row[8], # end_time
|
|
94
|
+
"status": {
|
|
95
|
+
"status_code": row[10], # status_code
|
|
96
|
+
"description": row[11], # status_description
|
|
97
|
+
},
|
|
98
|
+
"attributes": json.loads(row[12])
|
|
99
|
+
if row[12]
|
|
100
|
+
else {}, # attributes
|
|
101
|
+
"events": json.loads(row[13]) if row[13] else [], # events
|
|
102
|
+
"links": json.loads(row[14]) if row[14] else [], # links
|
|
103
|
+
"resource": json.loads(row[15]) if row[15] else {}, # resource
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
# Add parent_id if exists
|
|
107
|
+
if row[2]: # parent_id
|
|
108
|
+
span["parent_id"] = row[2]
|
|
109
|
+
|
|
110
|
+
spans.append(span)
|
|
111
|
+
|
|
112
|
+
logger.debug(f"Loaded {len(spans)} spans from DuckDB")
|
|
113
|
+
return spans
|
|
114
|
+
|
|
115
|
+
except Exception as e:
|
|
116
|
+
logger.exception(f"Error reading traces from DuckDB: {e}")
|
|
117
|
+
return []
|
|
118
|
+
|
|
119
|
+
@app.get("/api/traces/services")
|
|
120
|
+
async def get_trace_services() -> dict[str, Any]:
|
|
121
|
+
"""Get list of unique services that have been traced.
|
|
122
|
+
|
|
123
|
+
Returns:
|
|
124
|
+
{
|
|
125
|
+
"services": ["Flock", "Agent", "DSPyEngine", ...],
|
|
126
|
+
"operations": ["Flock.publish", "Agent.execute", ...]
|
|
127
|
+
}
|
|
128
|
+
"""
|
|
129
|
+
db_path = Path(".flock/traces.duckdb")
|
|
130
|
+
|
|
131
|
+
if not db_path.exists():
|
|
132
|
+
return {"services": [], "operations": []}
|
|
133
|
+
|
|
134
|
+
try:
|
|
135
|
+
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
136
|
+
# Get unique services
|
|
137
|
+
services_result = conn.execute("""
|
|
138
|
+
SELECT DISTINCT service
|
|
139
|
+
FROM spans
|
|
140
|
+
WHERE service IS NOT NULL
|
|
141
|
+
ORDER BY service
|
|
142
|
+
""").fetchall()
|
|
143
|
+
|
|
144
|
+
# Get unique operations
|
|
145
|
+
operations_result = conn.execute("""
|
|
146
|
+
SELECT DISTINCT name
|
|
147
|
+
FROM spans
|
|
148
|
+
WHERE name IS NOT NULL
|
|
149
|
+
ORDER BY name
|
|
150
|
+
""").fetchall()
|
|
151
|
+
|
|
152
|
+
return {
|
|
153
|
+
"services": [row[0] for row in services_result],
|
|
154
|
+
"operations": [row[0] for row in operations_result],
|
|
155
|
+
}
|
|
156
|
+
|
|
157
|
+
except Exception as e:
|
|
158
|
+
logger.exception(f"Error reading trace services: {e}")
|
|
159
|
+
return {"services": [], "operations": []}
|
|
160
|
+
|
|
161
|
+
@app.post("/api/traces/clear")
|
|
162
|
+
async def clear_traces() -> dict[str, Any]:
|
|
163
|
+
"""Clear all traces from DuckDB database.
|
|
164
|
+
|
|
165
|
+
Returns:
|
|
166
|
+
{
|
|
167
|
+
"success": true,
|
|
168
|
+
"deleted_count": 123,
|
|
169
|
+
"error": null
|
|
170
|
+
}
|
|
171
|
+
"""
|
|
172
|
+
result = Flock.clear_traces()
|
|
173
|
+
if result["success"]:
|
|
174
|
+
logger.info(f"Cleared {result['deleted_count']} trace spans via API")
|
|
175
|
+
else:
|
|
176
|
+
logger.error(f"Failed to clear traces: {result['error']}")
|
|
177
|
+
|
|
178
|
+
return result
|
|
179
|
+
|
|
180
|
+
@app.post("/api/traces/query")
|
|
181
|
+
async def execute_trace_query(request: dict[str, Any]) -> dict[str, Any]:
|
|
182
|
+
"""Execute a DuckDB SQL query on the traces database.
|
|
183
|
+
|
|
184
|
+
Security: Only SELECT queries allowed, rate-limited.
|
|
185
|
+
"""
|
|
186
|
+
query = request.get("query", "").strip()
|
|
187
|
+
|
|
188
|
+
if not query:
|
|
189
|
+
return {"error": "Query cannot be empty", "results": [], "columns": []}
|
|
190
|
+
|
|
191
|
+
# Security: Only allow SELECT queries
|
|
192
|
+
query_upper = query.upper().strip()
|
|
193
|
+
if not query_upper.startswith("SELECT"):
|
|
194
|
+
return {
|
|
195
|
+
"error": "Only SELECT queries are allowed",
|
|
196
|
+
"results": [],
|
|
197
|
+
"columns": [],
|
|
198
|
+
}
|
|
199
|
+
|
|
200
|
+
# Check for dangerous keywords
|
|
201
|
+
dangerous = [
|
|
202
|
+
"DROP",
|
|
203
|
+
"DELETE",
|
|
204
|
+
"INSERT",
|
|
205
|
+
"UPDATE",
|
|
206
|
+
"ALTER",
|
|
207
|
+
"CREATE",
|
|
208
|
+
"TRUNCATE",
|
|
209
|
+
]
|
|
210
|
+
if any(keyword in query_upper for keyword in dangerous):
|
|
211
|
+
return {
|
|
212
|
+
"error": "Query contains forbidden operations",
|
|
213
|
+
"results": [],
|
|
214
|
+
"columns": [],
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
db_path = Path(".flock/traces.duckdb")
|
|
218
|
+
if not db_path.exists():
|
|
219
|
+
return {
|
|
220
|
+
"error": "Trace database not found",
|
|
221
|
+
"results": [],
|
|
222
|
+
"columns": [],
|
|
223
|
+
}
|
|
224
|
+
|
|
225
|
+
try:
|
|
226
|
+
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
227
|
+
result = conn.execute(query).fetchall()
|
|
228
|
+
columns = (
|
|
229
|
+
[desc[0] for desc in conn.description] if conn.description else []
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
# Convert to JSON-serializable format
|
|
233
|
+
results = []
|
|
234
|
+
for row in result:
|
|
235
|
+
row_dict = {}
|
|
236
|
+
for i, col in enumerate(columns):
|
|
237
|
+
val = row[i]
|
|
238
|
+
# Convert bytes to string, handle other types
|
|
239
|
+
if isinstance(val, bytes):
|
|
240
|
+
row_dict[col] = val.decode("utf-8")
|
|
241
|
+
else:
|
|
242
|
+
row_dict[col] = val
|
|
243
|
+
results.append(row_dict)
|
|
244
|
+
|
|
245
|
+
return {
|
|
246
|
+
"results": results,
|
|
247
|
+
"columns": columns,
|
|
248
|
+
"row_count": len(results),
|
|
249
|
+
}
|
|
250
|
+
except Exception as e:
|
|
251
|
+
logger.exception(f"DuckDB query error: {e}")
|
|
252
|
+
return {"error": str(e), "results": [], "columns": []}
|
|
253
|
+
|
|
254
|
+
@app.get("/api/traces/stats")
|
|
255
|
+
async def get_trace_stats() -> dict[str, Any]:
|
|
256
|
+
"""Get statistics about the trace database.
|
|
257
|
+
|
|
258
|
+
Returns:
|
|
259
|
+
{
|
|
260
|
+
"total_spans": 123,
|
|
261
|
+
"total_traces": 45,
|
|
262
|
+
"services_count": 5,
|
|
263
|
+
"oldest_trace": "2025-10-07T12:00:00Z",
|
|
264
|
+
"newest_trace": "2025-10-07T14:30:00Z",
|
|
265
|
+
"database_size_mb": 12.5
|
|
266
|
+
}
|
|
267
|
+
"""
|
|
268
|
+
db_path = Path(".flock/traces.duckdb")
|
|
269
|
+
|
|
270
|
+
if not db_path.exists():
|
|
271
|
+
return {
|
|
272
|
+
"total_spans": 0,
|
|
273
|
+
"total_traces": 0,
|
|
274
|
+
"services_count": 0,
|
|
275
|
+
"oldest_trace": None,
|
|
276
|
+
"newest_trace": None,
|
|
277
|
+
"database_size_mb": 0,
|
|
278
|
+
}
|
|
279
|
+
|
|
280
|
+
try:
|
|
281
|
+
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
282
|
+
# Get total spans
|
|
283
|
+
total_spans = conn.execute("SELECT COUNT(*) FROM spans").fetchone()[0]
|
|
284
|
+
|
|
285
|
+
# Get total unique traces
|
|
286
|
+
total_traces = conn.execute(
|
|
287
|
+
"SELECT COUNT(DISTINCT trace_id) FROM spans"
|
|
288
|
+
).fetchone()[0]
|
|
289
|
+
|
|
290
|
+
# Get services count
|
|
291
|
+
services_count = conn.execute(
|
|
292
|
+
"SELECT COUNT(DISTINCT service) FROM spans WHERE service IS NOT NULL"
|
|
293
|
+
).fetchone()[0]
|
|
294
|
+
|
|
295
|
+
# Get time range
|
|
296
|
+
time_range = conn.execute("""
|
|
297
|
+
SELECT
|
|
298
|
+
MIN(start_time) as oldest,
|
|
299
|
+
MAX(start_time) as newest
|
|
300
|
+
FROM spans
|
|
301
|
+
""").fetchone()
|
|
302
|
+
|
|
303
|
+
oldest_trace = None
|
|
304
|
+
newest_trace = None
|
|
305
|
+
if time_range and time_range[0]:
|
|
306
|
+
# Convert nanoseconds to datetime
|
|
307
|
+
oldest_trace = datetime.fromtimestamp(
|
|
308
|
+
time_range[0] / 1_000_000_000, tz=UTC
|
|
309
|
+
).isoformat()
|
|
310
|
+
newest_trace = datetime.fromtimestamp(
|
|
311
|
+
time_range[1] / 1_000_000_000, tz=UTC
|
|
312
|
+
).isoformat()
|
|
313
|
+
|
|
314
|
+
# Get file size
|
|
315
|
+
size_mb = db_path.stat().st_size / (1024 * 1024)
|
|
316
|
+
|
|
317
|
+
return {
|
|
318
|
+
"total_spans": total_spans,
|
|
319
|
+
"total_traces": total_traces,
|
|
320
|
+
"services_count": services_count,
|
|
321
|
+
"oldest_trace": oldest_trace,
|
|
322
|
+
"newest_trace": newest_trace,
|
|
323
|
+
"database_size_mb": round(size_mb, 2),
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
except Exception as e:
|
|
327
|
+
logger.exception(f"Error reading trace stats: {e}")
|
|
328
|
+
return {
|
|
329
|
+
"total_spans": 0,
|
|
330
|
+
"total_traces": 0,
|
|
331
|
+
"services_count": 0,
|
|
332
|
+
"oldest_trace": None,
|
|
333
|
+
"newest_trace": None,
|
|
334
|
+
"database_size_mb": 0,
|
|
335
|
+
}
|
|
336
|
+
|
|
337
|
+
@app.get("/api/streaming-history/{agent_name}")
|
|
338
|
+
async def get_streaming_history(agent_name: str) -> dict[str, Any]:
|
|
339
|
+
"""Get historical streaming output for a specific agent.
|
|
340
|
+
|
|
341
|
+
Args:
|
|
342
|
+
agent_name: Name of the agent to get streaming history for
|
|
343
|
+
|
|
344
|
+
Returns:
|
|
345
|
+
{
|
|
346
|
+
"agent_name": "agent_name",
|
|
347
|
+
"events": [
|
|
348
|
+
{
|
|
349
|
+
"correlation_id": "...",
|
|
350
|
+
"timestamp": "...",
|
|
351
|
+
"agent_name": "...",
|
|
352
|
+
"run_id": "...",
|
|
353
|
+
"output_type": "llm_token",
|
|
354
|
+
"content": "...",
|
|
355
|
+
"sequence": 0,
|
|
356
|
+
"is_final": false
|
|
357
|
+
},
|
|
358
|
+
...
|
|
359
|
+
]
|
|
360
|
+
}
|
|
361
|
+
"""
|
|
362
|
+
try:
|
|
363
|
+
history = websocket_manager.get_streaming_history(agent_name)
|
|
364
|
+
return {
|
|
365
|
+
"agent_name": agent_name,
|
|
366
|
+
"events": [event.model_dump() for event in history],
|
|
367
|
+
}
|
|
368
|
+
except Exception as e:
|
|
369
|
+
logger.exception(f"Failed to get streaming history for {agent_name}: {e}")
|
|
370
|
+
raise HTTPException(
|
|
371
|
+
status_code=500, detail=f"Failed to get streaming history: {e!s}"
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
@app.get("/api/artifacts/history/{node_id}")
|
|
375
|
+
async def get_message_history(node_id: str) -> dict[str, Any]:
|
|
376
|
+
"""Get complete message history for a node (both produced and consumed).
|
|
377
|
+
|
|
378
|
+
Phase 4.1 Feature Gap Fix: Returns both messages produced by AND consumed by
|
|
379
|
+
the specified node, enabling complete message history view in MessageHistoryTab.
|
|
380
|
+
|
|
381
|
+
Args:
|
|
382
|
+
node_id: ID of the node (agent name or message ID)
|
|
383
|
+
|
|
384
|
+
Returns:
|
|
385
|
+
{
|
|
386
|
+
"node_id": "agent_name",
|
|
387
|
+
"messages": [
|
|
388
|
+
{
|
|
389
|
+
"id": "artifact-uuid",
|
|
390
|
+
"type": "ArtifactType",
|
|
391
|
+
"direction": "published"|"consumed",
|
|
392
|
+
"payload": {...},
|
|
393
|
+
"timestamp": "2025-10-11T...",
|
|
394
|
+
"correlation_id": "uuid",
|
|
395
|
+
"produced_by": "producer_name",
|
|
396
|
+
"consumed_at": "2025-10-11T..." (only for consumed)
|
|
397
|
+
},
|
|
398
|
+
...
|
|
399
|
+
],
|
|
400
|
+
"total": 123
|
|
401
|
+
}
|
|
402
|
+
"""
|
|
403
|
+
try:
|
|
404
|
+
messages = []
|
|
405
|
+
|
|
406
|
+
# 1. Get messages PRODUCED by this node
|
|
407
|
+
produced_filter = FilterConfig(produced_by={node_id})
|
|
408
|
+
(
|
|
409
|
+
produced_artifacts,
|
|
410
|
+
_produced_count,
|
|
411
|
+
) = await orchestrator.store.query_artifacts(
|
|
412
|
+
produced_filter, limit=100, offset=0, embed_meta=False
|
|
413
|
+
)
|
|
414
|
+
|
|
415
|
+
messages.extend([
|
|
416
|
+
{
|
|
417
|
+
"id": str(artifact.id),
|
|
418
|
+
"type": artifact.type,
|
|
419
|
+
"direction": "published",
|
|
420
|
+
"payload": artifact.payload,
|
|
421
|
+
"timestamp": artifact.created_at.isoformat(),
|
|
422
|
+
"correlation_id": str(artifact.correlation_id)
|
|
423
|
+
if artifact.correlation_id
|
|
424
|
+
else None,
|
|
425
|
+
"produced_by": artifact.produced_by,
|
|
426
|
+
}
|
|
427
|
+
for artifact in produced_artifacts
|
|
428
|
+
])
|
|
429
|
+
|
|
430
|
+
# 2. Get messages CONSUMED by this node
|
|
431
|
+
# Query all artifacts with consumption metadata
|
|
432
|
+
all_artifacts_filter = FilterConfig() # No filter = all artifacts
|
|
433
|
+
all_envelopes, _ = await orchestrator.store.query_artifacts(
|
|
434
|
+
all_artifacts_filter, limit=500, offset=0, embed_meta=True
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
messages.extend([
|
|
438
|
+
{
|
|
439
|
+
"id": str(envelope.artifact.id),
|
|
440
|
+
"type": envelope.artifact.type,
|
|
441
|
+
"direction": "consumed",
|
|
442
|
+
"payload": envelope.artifact.payload,
|
|
443
|
+
"timestamp": envelope.artifact.created_at.isoformat(),
|
|
444
|
+
"correlation_id": str(envelope.artifact.correlation_id)
|
|
445
|
+
if envelope.artifact.correlation_id
|
|
446
|
+
else None,
|
|
447
|
+
"produced_by": envelope.artifact.produced_by,
|
|
448
|
+
"consumed_at": consumption.consumed_at.isoformat(),
|
|
449
|
+
}
|
|
450
|
+
for envelope in all_envelopes
|
|
451
|
+
for consumption in envelope.consumptions
|
|
452
|
+
if consumption.consumer == node_id
|
|
453
|
+
])
|
|
454
|
+
|
|
455
|
+
# Sort by timestamp (most recent first)
|
|
456
|
+
messages.sort(
|
|
457
|
+
key=lambda m: m.get("consumed_at", m["timestamp"]), reverse=True
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
return {
|
|
461
|
+
"node_id": node_id,
|
|
462
|
+
"messages": messages,
|
|
463
|
+
"total": len(messages),
|
|
464
|
+
}
|
|
465
|
+
|
|
466
|
+
except Exception as e:
|
|
467
|
+
logger.exception(f"Failed to get message history for {node_id}: {e}")
|
|
468
|
+
raise HTTPException(
|
|
469
|
+
status_code=500, detail=f"Failed to get message history: {e!s}"
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
@app.get("/api/agents/{agent_id}/runs")
|
|
473
|
+
async def get_agent_runs(agent_id: str) -> dict[str, Any]:
|
|
474
|
+
"""Get run history for an agent.
|
|
475
|
+
|
|
476
|
+
Phase 4.1 Feature Gap Fix: Returns agent execution history with metrics
|
|
477
|
+
for display in RunStatusTab.
|
|
478
|
+
|
|
479
|
+
Args:
|
|
480
|
+
agent_id: ID of the agent
|
|
481
|
+
|
|
482
|
+
Returns:
|
|
483
|
+
{
|
|
484
|
+
"agent_id": "agent_name",
|
|
485
|
+
"runs": [
|
|
486
|
+
{
|
|
487
|
+
"run_id": "uuid",
|
|
488
|
+
"start_time": "2025-10-11T...",
|
|
489
|
+
"end_time": "2025-10-11T...",
|
|
490
|
+
"duration_ms": 1234,
|
|
491
|
+
"status": "completed"|"active"|"error",
|
|
492
|
+
"metrics": {
|
|
493
|
+
"tokens_used": 123,
|
|
494
|
+
"cost_usd": 0.0012,
|
|
495
|
+
"artifacts_produced": 5
|
|
496
|
+
},
|
|
497
|
+
"error_message": "error details" (if status=error)
|
|
498
|
+
},
|
|
499
|
+
...
|
|
500
|
+
],
|
|
501
|
+
"total": 50
|
|
502
|
+
}
|
|
503
|
+
"""
|
|
504
|
+
try:
|
|
505
|
+
# TODO: Implement run history tracking in orchestrator
|
|
506
|
+
# For now, return empty array with proper structure
|
|
507
|
+
# This unblocks frontend development and can be enhanced later
|
|
508
|
+
|
|
509
|
+
runs = []
|
|
510
|
+
|
|
511
|
+
# FUTURE: Query run history from orchestrator or store
|
|
512
|
+
# Example implementation when run tracking is added:
|
|
513
|
+
# runs = await orchestrator.get_agent_run_history(agent_id, limit=50)
|
|
514
|
+
|
|
515
|
+
return {"agent_id": agent_id, "runs": runs, "total": len(runs)}
|
|
516
|
+
|
|
517
|
+
except Exception as e:
|
|
518
|
+
logger.exception(f"Failed to get run history for {agent_id}: {e}")
|
|
519
|
+
raise HTTPException(
|
|
520
|
+
status_code=500, detail=f"Failed to get run history: {e!s}"
|
|
521
|
+
)
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
"""WebSocket and real-time dashboard routes."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import TYPE_CHECKING
|
|
5
|
+
|
|
6
|
+
from fastapi import FastAPI, WebSocket, WebSocketDisconnect
|
|
7
|
+
from fastapi.staticfiles import StaticFiles
|
|
8
|
+
|
|
9
|
+
from flock.dashboard.collector import DashboardEventCollector
|
|
10
|
+
from flock.dashboard.graph_builder import GraphAssembler
|
|
11
|
+
from flock.dashboard.models.graph import GraphRequest, GraphSnapshot
|
|
12
|
+
from flock.dashboard.websocket import WebSocketManager
|
|
13
|
+
from flock.logging.logging import get_logger
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
if TYPE_CHECKING:
|
|
17
|
+
from flock.core import Flock
|
|
18
|
+
|
|
19
|
+
logger = get_logger("dashboard.routes.websocket")
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def register_websocket_routes(
|
|
23
|
+
app: FastAPI,
|
|
24
|
+
orchestrator: "Flock",
|
|
25
|
+
websocket_manager: WebSocketManager,
|
|
26
|
+
event_collector: DashboardEventCollector,
|
|
27
|
+
graph_assembler: GraphAssembler | None,
|
|
28
|
+
use_v2: bool = False,
|
|
29
|
+
) -> None:
|
|
30
|
+
"""Register WebSocket endpoint and static file serving.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
app: FastAPI application instance
|
|
34
|
+
orchestrator: Flock orchestrator instance
|
|
35
|
+
websocket_manager: WebSocket manager for real-time updates
|
|
36
|
+
event_collector: Dashboard event collector
|
|
37
|
+
graph_assembler: Graph assembler for dashboard snapshots
|
|
38
|
+
use_v2: Whether to use v2 dashboard frontend
|
|
39
|
+
"""
|
|
40
|
+
|
|
41
|
+
@app.websocket("/ws")
|
|
42
|
+
async def websocket_endpoint(websocket: WebSocket) -> None:
|
|
43
|
+
"""WebSocket endpoint for real-time dashboard events.
|
|
44
|
+
|
|
45
|
+
Handles connection lifecycle:
|
|
46
|
+
1. Accept connection
|
|
47
|
+
2. Add to WebSocketManager pool
|
|
48
|
+
3. Keep connection alive
|
|
49
|
+
4. Handle disconnection gracefully
|
|
50
|
+
"""
|
|
51
|
+
await websocket.accept()
|
|
52
|
+
await websocket_manager.add_client(websocket)
|
|
53
|
+
|
|
54
|
+
try:
|
|
55
|
+
# Keep connection alive and handle incoming messages
|
|
56
|
+
# Dashboard clients may send heartbeat responses or control messages
|
|
57
|
+
while True:
|
|
58
|
+
# Wait for messages from client (pong responses, etc.)
|
|
59
|
+
try:
|
|
60
|
+
data = await websocket.receive_text()
|
|
61
|
+
# Handle client messages if needed (e.g., pong responses)
|
|
62
|
+
# For Phase 3, we primarily broadcast from server to client
|
|
63
|
+
logger.debug(f"Received message from client: {data[:100]}")
|
|
64
|
+
except WebSocketDisconnect:
|
|
65
|
+
logger.info("WebSocket client disconnected")
|
|
66
|
+
break
|
|
67
|
+
except Exception as e:
|
|
68
|
+
logger.warning(f"Error receiving WebSocket message: {e}")
|
|
69
|
+
break
|
|
70
|
+
|
|
71
|
+
except Exception as e:
|
|
72
|
+
logger.exception(f"WebSocket endpoint error: {e}")
|
|
73
|
+
finally:
|
|
74
|
+
# Clean up: remove client from pool
|
|
75
|
+
await websocket_manager.remove_client(websocket)
|
|
76
|
+
|
|
77
|
+
if graph_assembler is not None:
|
|
78
|
+
|
|
79
|
+
@app.post("/api/dashboard/graph", response_model=GraphSnapshot)
|
|
80
|
+
async def get_dashboard_graph(request: GraphRequest) -> GraphSnapshot:
|
|
81
|
+
"""Return server-side assembled dashboard graph snapshot."""
|
|
82
|
+
return await graph_assembler.build_snapshot(request)
|
|
83
|
+
|
|
84
|
+
# Static file serving
|
|
85
|
+
dashboard_dir = Path(__file__).parent.parent
|
|
86
|
+
frontend_root = dashboard_dir.parent / ("frontend_v2" if use_v2 else "frontend")
|
|
87
|
+
static_dir = dashboard_dir / ("static_v2" if use_v2 else "static")
|
|
88
|
+
|
|
89
|
+
possible_dirs = [
|
|
90
|
+
static_dir,
|
|
91
|
+
frontend_root / "dist",
|
|
92
|
+
frontend_root / "build",
|
|
93
|
+
]
|
|
94
|
+
|
|
95
|
+
for dir_path in possible_dirs:
|
|
96
|
+
if dir_path.exists() and dir_path.is_dir():
|
|
97
|
+
logger.info(f"Mounting static files from: {dir_path}")
|
|
98
|
+
# Mount at root to serve index.html and other frontend assets
|
|
99
|
+
app.mount(
|
|
100
|
+
"/",
|
|
101
|
+
StaticFiles(directory=str(dir_path), html=True),
|
|
102
|
+
name="dashboard-static",
|
|
103
|
+
)
|
|
104
|
+
break
|
|
105
|
+
else:
|
|
106
|
+
logger.warning(
|
|
107
|
+
f"No static directory found for dashboard frontend (expected one of: {possible_dirs})."
|
|
108
|
+
)
|