flock-core 0.5.11__py3-none-any.whl → 0.5.21__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/__init__.py +1 -1
- flock/agent/__init__.py +30 -0
- flock/agent/builder_helpers.py +192 -0
- flock/agent/builder_validator.py +169 -0
- flock/agent/component_lifecycle.py +325 -0
- flock/agent/context_resolver.py +141 -0
- flock/agent/mcp_integration.py +212 -0
- flock/agent/output_processor.py +304 -0
- flock/api/__init__.py +20 -0
- flock/{api_models.py → api/models.py} +0 -2
- flock/{service.py → api/service.py} +3 -3
- flock/cli.py +2 -2
- flock/components/__init__.py +41 -0
- flock/components/agent/__init__.py +22 -0
- flock/{components.py → components/agent/base.py} +4 -3
- flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
- flock/components/orchestrator/__init__.py +22 -0
- flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
- flock/components/orchestrator/circuit_breaker.py +95 -0
- flock/components/orchestrator/collection.py +143 -0
- flock/components/orchestrator/deduplication.py +78 -0
- flock/core/__init__.py +30 -0
- flock/core/agent.py +953 -0
- flock/{artifacts.py → core/artifacts.py} +1 -1
- flock/{context_provider.py → core/context_provider.py} +3 -3
- flock/core/orchestrator.py +1102 -0
- flock/{store.py → core/store.py} +99 -454
- flock/{subscription.py → core/subscription.py} +1 -1
- flock/dashboard/collector.py +5 -5
- flock/dashboard/events.py +1 -1
- flock/dashboard/graph_builder.py +7 -7
- flock/dashboard/routes/__init__.py +21 -0
- flock/dashboard/routes/control.py +327 -0
- flock/dashboard/routes/helpers.py +340 -0
- flock/dashboard/routes/themes.py +76 -0
- flock/dashboard/routes/traces.py +521 -0
- flock/dashboard/routes/websocket.py +108 -0
- flock/dashboard/service.py +43 -1316
- flock/engines/dspy/__init__.py +20 -0
- flock/engines/dspy/artifact_materializer.py +216 -0
- flock/engines/dspy/signature_builder.py +474 -0
- flock/engines/dspy/streaming_executor.py +812 -0
- flock/engines/dspy_engine.py +45 -1330
- flock/engines/examples/simple_batch_engine.py +2 -2
- flock/engines/streaming/__init__.py +3 -0
- flock/engines/streaming/sinks.py +489 -0
- flock/examples.py +7 -7
- flock/logging/logging.py +1 -16
- flock/models/__init__.py +10 -0
- flock/orchestrator/__init__.py +45 -0
- flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
- flock/orchestrator/artifact_manager.py +168 -0
- flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
- flock/orchestrator/component_runner.py +389 -0
- flock/orchestrator/context_builder.py +167 -0
- flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
- flock/orchestrator/event_emitter.py +167 -0
- flock/orchestrator/initialization.py +184 -0
- flock/orchestrator/lifecycle_manager.py +226 -0
- flock/orchestrator/mcp_manager.py +202 -0
- flock/orchestrator/scheduler.py +189 -0
- flock/orchestrator/server_manager.py +234 -0
- flock/orchestrator/tracing.py +147 -0
- flock/storage/__init__.py +10 -0
- flock/storage/artifact_aggregator.py +158 -0
- flock/storage/in_memory/__init__.py +6 -0
- flock/storage/in_memory/artifact_filter.py +114 -0
- flock/storage/in_memory/history_aggregator.py +115 -0
- flock/storage/sqlite/__init__.py +10 -0
- flock/storage/sqlite/agent_history_queries.py +154 -0
- flock/storage/sqlite/consumption_loader.py +100 -0
- flock/storage/sqlite/query_builder.py +112 -0
- flock/storage/sqlite/query_params_builder.py +91 -0
- flock/storage/sqlite/schema_manager.py +168 -0
- flock/storage/sqlite/summary_queries.py +194 -0
- flock/utils/__init__.py +14 -0
- flock/utils/async_utils.py +67 -0
- flock/{runtime.py → utils/runtime.py} +3 -3
- flock/utils/time_utils.py +53 -0
- flock/utils/type_resolution.py +38 -0
- flock/{utilities.py → utils/utilities.py} +2 -2
- flock/utils/validation.py +57 -0
- flock/utils/visibility.py +79 -0
- flock/utils/visibility_utils.py +134 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/METADATA +19 -5
- {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/RECORD +92 -34
- flock/agent.py +0 -1578
- flock/orchestrator.py +0 -1983
- /flock/{visibility.py → core/visibility.py} +0 -0
- /flock/{system_artifacts.py → models/system_artifacts.py} +0 -0
- /flock/{helper → utils}/cli_helper.py +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/WHEEL +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/entry_points.txt +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/licenses/LICENSE +0 -0
flock/dashboard/service.py
CHANGED
|
@@ -8,27 +8,22 @@ Provides real-time dashboard capabilities by:
|
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
10
|
import os
|
|
11
|
-
from datetime import UTC, datetime, timedelta
|
|
12
|
-
from importlib.metadata import PackageNotFoundError, version
|
|
13
|
-
from pathlib import Path
|
|
14
11
|
from typing import Any
|
|
15
|
-
from uuid import uuid4
|
|
16
12
|
|
|
17
|
-
from fastapi import HTTPException, WebSocket, WebSocketDisconnect
|
|
18
13
|
from fastapi.middleware.cors import CORSMiddleware
|
|
19
|
-
from fastapi.staticfiles import StaticFiles
|
|
20
|
-
from pydantic import ValidationError
|
|
21
14
|
|
|
22
|
-
from flock.
|
|
15
|
+
from flock.api.service import BlackboardHTTPService
|
|
16
|
+
from flock.core import Flock
|
|
23
17
|
from flock.dashboard.collector import DashboardEventCollector
|
|
24
|
-
from flock.dashboard.events import MessagePublishedEvent, VisibilitySpec
|
|
25
18
|
from flock.dashboard.graph_builder import GraphAssembler
|
|
26
|
-
from flock.dashboard.
|
|
19
|
+
from flock.dashboard.routes import (
|
|
20
|
+
register_control_routes,
|
|
21
|
+
register_theme_routes,
|
|
22
|
+
register_trace_routes,
|
|
23
|
+
register_websocket_routes,
|
|
24
|
+
)
|
|
27
25
|
from flock.dashboard.websocket import WebSocketManager
|
|
28
26
|
from flock.logging.logging import get_logger
|
|
29
|
-
from flock.orchestrator import Flock
|
|
30
|
-
from flock.registry import type_registry
|
|
31
|
-
from flock.service import BlackboardHTTPService
|
|
32
27
|
|
|
33
28
|
|
|
34
29
|
logger = get_logger("dashboard.service")
|
|
@@ -58,22 +53,11 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
58
53
|
orchestrator: Flock orchestrator instance
|
|
59
54
|
websocket_manager: Optional WebSocketManager (creates new if not provided)
|
|
60
55
|
event_collector: Optional DashboardEventCollector (creates new if not provided)
|
|
56
|
+
use_v2: Whether to use v2 dashboard frontend
|
|
61
57
|
"""
|
|
62
58
|
# Initialize base service
|
|
63
59
|
super().__init__(orchestrator)
|
|
64
60
|
|
|
65
|
-
# Add dashboard-specific tags to OpenAPI
|
|
66
|
-
self.app.openapi_tags.extend([
|
|
67
|
-
{
|
|
68
|
-
"name": "Dashboard UI",
|
|
69
|
-
"description": "**Internal endpoints** used by the Flock dashboard UI. Not intended for direct use.",
|
|
70
|
-
},
|
|
71
|
-
{
|
|
72
|
-
"name": "Schema Discovery",
|
|
73
|
-
"description": "Endpoints for discovering available artifact types and their schemas.",
|
|
74
|
-
},
|
|
75
|
-
])
|
|
76
|
-
|
|
77
61
|
# Initialize WebSocket manager and event collector
|
|
78
62
|
self.websocket_manager = websocket_manager or WebSocketManager()
|
|
79
63
|
self.event_collector = event_collector or DashboardEventCollector(
|
|
@@ -102,978 +86,49 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
102
86
|
|
|
103
87
|
# IMPORTANT: Register API routes BEFORE static files!
|
|
104
88
|
# Static file mount acts as catch-all and must be last
|
|
105
|
-
self.
|
|
106
|
-
self._register_theme_routes()
|
|
107
|
-
self._register_dashboard_routes()
|
|
89
|
+
self._register_all_routes()
|
|
108
90
|
|
|
109
91
|
logger.info("DashboardHTTPService initialized")
|
|
110
92
|
|
|
111
|
-
def
|
|
112
|
-
"""Register
|
|
113
|
-
app = self.app
|
|
114
|
-
|
|
115
|
-
@app.websocket("/ws")
|
|
116
|
-
async def websocket_endpoint(websocket: WebSocket) -> None:
|
|
117
|
-
"""WebSocket endpoint for real-time dashboard events.
|
|
118
|
-
|
|
119
|
-
Handles connection lifecycle:
|
|
120
|
-
1. Accept connection
|
|
121
|
-
2. Add to WebSocketManager pool
|
|
122
|
-
3. Keep connection alive
|
|
123
|
-
4. Handle disconnection gracefully
|
|
124
|
-
"""
|
|
125
|
-
await websocket.accept()
|
|
126
|
-
await self.websocket_manager.add_client(websocket)
|
|
127
|
-
|
|
128
|
-
try:
|
|
129
|
-
# Keep connection alive and handle incoming messages
|
|
130
|
-
# Dashboard clients may send heartbeat responses or control messages
|
|
131
|
-
while True:
|
|
132
|
-
# Wait for messages from client (pong responses, etc.)
|
|
133
|
-
try:
|
|
134
|
-
data = await websocket.receive_text()
|
|
135
|
-
# Handle client messages if needed (e.g., pong responses)
|
|
136
|
-
# For Phase 3, we primarily broadcast from server to client
|
|
137
|
-
logger.debug(f"Received message from client: {data[:100]}")
|
|
138
|
-
except WebSocketDisconnect:
|
|
139
|
-
logger.info("WebSocket client disconnected")
|
|
140
|
-
break
|
|
141
|
-
except Exception as e:
|
|
142
|
-
logger.warning(f"Error receiving WebSocket message: {e}")
|
|
143
|
-
break
|
|
93
|
+
def _register_all_routes(self) -> None:
|
|
94
|
+
"""Register all dashboard routes using route modules.
|
|
144
95
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
if self.graph_assembler is not None:
|
|
152
|
-
|
|
153
|
-
@app.post(
|
|
154
|
-
"/api/dashboard/graph",
|
|
155
|
-
response_model=GraphSnapshot,
|
|
156
|
-
tags=["Dashboard UI"],
|
|
157
|
-
)
|
|
158
|
-
async def get_dashboard_graph(request: GraphRequest) -> GraphSnapshot:
|
|
159
|
-
"""Return server-side assembled dashboard graph snapshot."""
|
|
160
|
-
return await self.graph_assembler.build_snapshot(request)
|
|
96
|
+
Routes are organized into focused modules:
|
|
97
|
+
- control: Control API endpoints (publish, invoke, agents, etc.)
|
|
98
|
+
- traces: Trace-related endpoints (OpenTelemetry, history, etc.)
|
|
99
|
+
- themes: Theme management endpoints
|
|
100
|
+
- websocket: WebSocket and real-time dashboard endpoints
|
|
161
101
|
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
102
|
+
Route registration order matters - static files must be last!
|
|
103
|
+
"""
|
|
104
|
+
# Register control routes (artifact types, agents, version, publish, invoke)
|
|
105
|
+
register_control_routes(
|
|
106
|
+
app=self.app,
|
|
107
|
+
orchestrator=self.orchestrator,
|
|
108
|
+
websocket_manager=self.websocket_manager,
|
|
109
|
+
event_collector=self.event_collector,
|
|
165
110
|
)
|
|
166
|
-
static_dir = dashboard_dir / ("static_v2" if self.use_v2 else "static")
|
|
167
|
-
|
|
168
|
-
possible_dirs = [
|
|
169
|
-
static_dir,
|
|
170
|
-
frontend_root / "dist",
|
|
171
|
-
frontend_root / "build",
|
|
172
|
-
]
|
|
173
|
-
|
|
174
|
-
for dir_path in possible_dirs:
|
|
175
|
-
if dir_path.exists() and dir_path.is_dir():
|
|
176
|
-
logger.info(f"Mounting static files from: {dir_path}")
|
|
177
|
-
# Mount at root to serve index.html and other frontend assets
|
|
178
|
-
app.mount(
|
|
179
|
-
"/",
|
|
180
|
-
StaticFiles(directory=str(dir_path), html=True),
|
|
181
|
-
name="dashboard-static",
|
|
182
|
-
)
|
|
183
|
-
break
|
|
184
|
-
else:
|
|
185
|
-
logger.warning(
|
|
186
|
-
f"No static directory found for dashboard frontend (expected one of: {possible_dirs})."
|
|
187
|
-
)
|
|
188
|
-
|
|
189
|
-
def _register_control_routes(self) -> None:
|
|
190
|
-
"""Register control API endpoints for dashboard operations."""
|
|
191
|
-
app = self.app
|
|
192
|
-
orchestrator = self.orchestrator
|
|
193
111
|
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
112
|
+
# Register trace routes (traces, services, stats, query, streaming, history)
|
|
113
|
+
register_trace_routes(
|
|
114
|
+
app=self.app,
|
|
115
|
+
orchestrator=self.orchestrator,
|
|
116
|
+
websocket_manager=self.websocket_manager,
|
|
117
|
+
event_collector=self.event_collector,
|
|
198
118
|
)
|
|
199
|
-
async def get_artifact_types() -> ArtifactTypesResponse:
|
|
200
|
-
"""Get all registered artifact types with their schemas.
|
|
201
|
-
|
|
202
|
-
Returns:
|
|
203
|
-
{
|
|
204
|
-
"artifact_types": [
|
|
205
|
-
{
|
|
206
|
-
"name": "TypeName",
|
|
207
|
-
"schema": {...}
|
|
208
|
-
},
|
|
209
|
-
...
|
|
210
|
-
]
|
|
211
|
-
}
|
|
212
|
-
"""
|
|
213
|
-
artifact_types = []
|
|
214
|
-
|
|
215
|
-
for type_name in type_registry._by_name:
|
|
216
|
-
try:
|
|
217
|
-
model_class = type_registry.resolve(type_name)
|
|
218
|
-
# Get Pydantic schema
|
|
219
|
-
schema = model_class.model_json_schema()
|
|
220
|
-
artifact_types.append(
|
|
221
|
-
ArtifactTypeSchema(name=type_name, schema=schema)
|
|
222
|
-
)
|
|
223
|
-
except Exception as e:
|
|
224
|
-
logger.warning(f"Could not get schema for {type_name}: {e}")
|
|
225
|
-
|
|
226
|
-
return ArtifactTypesResponse(artifact_types=artifact_types)
|
|
227
|
-
|
|
228
|
-
@app.get("/api/agents", tags=["Dashboard UI"])
|
|
229
|
-
async def get_agents() -> dict[str, Any]:
|
|
230
|
-
"""Get all registered agents with logic operations state.
|
|
231
|
-
|
|
232
|
-
Phase 1.2 Enhancement: Now includes logic_operations configuration
|
|
233
|
-
and waiting state for agents using JoinSpec or BatchSpec.
|
|
234
|
-
|
|
235
|
-
Returns:
|
|
236
|
-
{
|
|
237
|
-
"agents": [
|
|
238
|
-
{
|
|
239
|
-
"name": "agent_name",
|
|
240
|
-
"description": "...",
|
|
241
|
-
"status": "ready" | "waiting" | "active",
|
|
242
|
-
"subscriptions": ["TypeA", "TypeB"],
|
|
243
|
-
"output_types": ["TypeC", "TypeD"],
|
|
244
|
-
"logic_operations": [ # NEW: Phase 1.2
|
|
245
|
-
{
|
|
246
|
-
"subscription_index": 0,
|
|
247
|
-
"subscription_types": ["TypeA", "TypeB"],
|
|
248
|
-
"join": {...}, # JoinSpec config
|
|
249
|
-
"batch": {...}, # BatchSpec config
|
|
250
|
-
"waiting_state": {...} # Current state
|
|
251
|
-
}
|
|
252
|
-
]
|
|
253
|
-
},
|
|
254
|
-
...
|
|
255
|
-
]
|
|
256
|
-
}
|
|
257
|
-
"""
|
|
258
|
-
agents = []
|
|
259
|
-
|
|
260
|
-
for agent in orchestrator.agents:
|
|
261
|
-
# Extract consumed types from agent subscriptions
|
|
262
|
-
consumed_types = []
|
|
263
|
-
for sub in agent.subscriptions:
|
|
264
|
-
consumed_types.extend(sub.type_names)
|
|
265
|
-
|
|
266
|
-
# Extract produced types from agent outputs
|
|
267
|
-
produced_types = [output.spec.type_name for output in agent.outputs]
|
|
268
|
-
|
|
269
|
-
# NEW Phase 1.2: Logic operations configuration
|
|
270
|
-
logic_operations = []
|
|
271
|
-
for idx, subscription in enumerate(agent.subscriptions):
|
|
272
|
-
logic_config = _build_logic_config(
|
|
273
|
-
agent, subscription, idx, orchestrator
|
|
274
|
-
)
|
|
275
|
-
if logic_config: # Only include if has join/batch
|
|
276
|
-
logic_operations.append(logic_config)
|
|
277
|
-
|
|
278
|
-
agent_data = {
|
|
279
|
-
"name": agent.name,
|
|
280
|
-
"description": agent.description or "",
|
|
281
|
-
"status": _compute_agent_status(
|
|
282
|
-
agent, orchestrator
|
|
283
|
-
), # NEW: Dynamic status
|
|
284
|
-
"subscriptions": consumed_types,
|
|
285
|
-
"output_types": produced_types,
|
|
286
|
-
}
|
|
287
|
-
|
|
288
|
-
if logic_operations:
|
|
289
|
-
agent_data["logic_operations"] = logic_operations
|
|
290
|
-
|
|
291
|
-
agents.append(agent_data)
|
|
292
|
-
|
|
293
|
-
return {"agents": agents}
|
|
294
|
-
|
|
295
|
-
@app.get("/api/version", tags=["Dashboard UI"])
|
|
296
|
-
async def get_version() -> dict[str, str]:
|
|
297
|
-
"""Get version information for the backend and dashboard.
|
|
298
|
-
|
|
299
|
-
Returns:
|
|
300
|
-
{
|
|
301
|
-
"backend_version": "0.1.18",
|
|
302
|
-
"package_name": "flock-flow"
|
|
303
|
-
}
|
|
304
|
-
"""
|
|
305
|
-
try:
|
|
306
|
-
backend_version = version("flock-flow")
|
|
307
|
-
except PackageNotFoundError:
|
|
308
|
-
# Fallback version if package not installed
|
|
309
|
-
backend_version = "0.2.0-dev"
|
|
310
|
-
|
|
311
|
-
return {"backend_version": backend_version, "package_name": "flock-flow"}
|
|
312
|
-
|
|
313
|
-
@app.post("/api/control/publish", tags=["Dashboard UI"])
|
|
314
|
-
async def publish_artifact(body: dict[str, Any]) -> dict[str, str]:
|
|
315
|
-
"""Publish artifact with correlation tracking.
|
|
316
|
-
|
|
317
|
-
Request body:
|
|
318
|
-
{
|
|
319
|
-
"artifact_type": "TypeName",
|
|
320
|
-
"content": {"field": "value", ...}
|
|
321
|
-
}
|
|
322
|
-
|
|
323
|
-
Returns:
|
|
324
|
-
{
|
|
325
|
-
"correlation_id": "<uuid>",
|
|
326
|
-
"published_at": "<iso-timestamp>"
|
|
327
|
-
}
|
|
328
|
-
"""
|
|
329
|
-
# Validate required fields
|
|
330
|
-
artifact_type = body.get("artifact_type")
|
|
331
|
-
content = body.get("content")
|
|
332
|
-
|
|
333
|
-
if not artifact_type:
|
|
334
|
-
raise HTTPException(status_code=400, detail="artifact_type is required")
|
|
335
|
-
if content is None:
|
|
336
|
-
raise HTTPException(status_code=400, detail="content is required")
|
|
337
|
-
|
|
338
|
-
try:
|
|
339
|
-
# Resolve type from registry
|
|
340
|
-
model_class = type_registry.resolve(artifact_type)
|
|
341
|
-
|
|
342
|
-
# Validate content against Pydantic schema
|
|
343
|
-
try:
|
|
344
|
-
instance = model_class(**content)
|
|
345
|
-
except ValidationError as e:
|
|
346
|
-
raise HTTPException(
|
|
347
|
-
status_code=422, detail=f"Validation error: {e!s}"
|
|
348
|
-
)
|
|
349
|
-
|
|
350
|
-
# Generate correlation ID
|
|
351
|
-
correlation_id = str(uuid4())
|
|
352
|
-
|
|
353
|
-
# Publish to orchestrator
|
|
354
|
-
artifact = await orchestrator.publish(
|
|
355
|
-
instance, correlation_id=correlation_id, is_dashboard=True
|
|
356
|
-
)
|
|
357
|
-
|
|
358
|
-
# Phase 11 Fix: Emit message_published event for dashboard visibility
|
|
359
|
-
# This enables virtual "orchestrator" agent to appear in both Agent View and Blackboard View
|
|
360
|
-
event = MessagePublishedEvent(
|
|
361
|
-
correlation_id=str(artifact.correlation_id),
|
|
362
|
-
artifact_id=str(artifact.id),
|
|
363
|
-
artifact_type=artifact.type,
|
|
364
|
-
produced_by=artifact.produced_by, # Will be "orchestrator" or similar for non-agent publishers
|
|
365
|
-
payload=artifact.payload,
|
|
366
|
-
visibility=VisibilitySpec(
|
|
367
|
-
kind="Public"
|
|
368
|
-
), # Dashboard-published artifacts are public by default
|
|
369
|
-
tags=list(artifact.tags) if artifact.tags else [],
|
|
370
|
-
partition_key=artifact.partition_key,
|
|
371
|
-
version=artifact.version,
|
|
372
|
-
consumers=[], # Will be populated by subscription matching in frontend
|
|
373
|
-
)
|
|
374
|
-
await self.websocket_manager.broadcast(event)
|
|
375
|
-
|
|
376
|
-
return {
|
|
377
|
-
"correlation_id": str(artifact.correlation_id),
|
|
378
|
-
"published_at": artifact.created_at.isoformat(),
|
|
379
|
-
}
|
|
380
|
-
|
|
381
|
-
except KeyError:
|
|
382
|
-
raise HTTPException(
|
|
383
|
-
status_code=422, detail=f"Unknown artifact type: {artifact_type}"
|
|
384
|
-
)
|
|
385
|
-
except Exception as e:
|
|
386
|
-
logger.exception(f"Error publishing artifact: {e}")
|
|
387
|
-
raise HTTPException(status_code=500, detail=str(e))
|
|
388
|
-
|
|
389
|
-
@app.post("/api/control/invoke", tags=["Dashboard UI"])
|
|
390
|
-
async def invoke_agent(body: dict[str, Any]) -> dict[str, Any]:
|
|
391
|
-
"""Directly invoke a specific agent.
|
|
392
|
-
|
|
393
|
-
Request body:
|
|
394
|
-
{
|
|
395
|
-
"agent_name": "agent_name",
|
|
396
|
-
"input": {"type": "TypeName", "field": "value", ...}
|
|
397
|
-
}
|
|
398
|
-
|
|
399
|
-
Returns:
|
|
400
|
-
{
|
|
401
|
-
"invocation_id": "<uuid>",
|
|
402
|
-
"result": "success"
|
|
403
|
-
}
|
|
404
|
-
"""
|
|
405
|
-
# Validate required fields
|
|
406
|
-
agent_name = body.get("agent_name")
|
|
407
|
-
input_data = body.get("input")
|
|
408
|
-
|
|
409
|
-
if not agent_name:
|
|
410
|
-
raise HTTPException(status_code=400, detail="agent_name is required")
|
|
411
|
-
if input_data is None:
|
|
412
|
-
raise HTTPException(status_code=400, detail="input is required")
|
|
413
|
-
|
|
414
|
-
try:
|
|
415
|
-
# Get agent from orchestrator
|
|
416
|
-
agent = orchestrator.get_agent(agent_name)
|
|
417
|
-
except KeyError:
|
|
418
|
-
raise HTTPException(
|
|
419
|
-
status_code=404, detail=f"Agent not found: {agent_name}"
|
|
420
|
-
)
|
|
421
|
-
|
|
422
|
-
try:
|
|
423
|
-
# Parse input type and create instance
|
|
424
|
-
input_type = input_data.get("type")
|
|
425
|
-
if not input_type:
|
|
426
|
-
raise HTTPException(
|
|
427
|
-
status_code=400, detail="input.type is required"
|
|
428
|
-
)
|
|
429
|
-
|
|
430
|
-
# Resolve type from registry
|
|
431
|
-
model_class = type_registry.resolve(input_type)
|
|
432
|
-
|
|
433
|
-
# Create payload by removing 'type' key
|
|
434
|
-
payload = {k: v for k, v in input_data.items() if k != "type"}
|
|
435
|
-
|
|
436
|
-
# Validate and create instance
|
|
437
|
-
try:
|
|
438
|
-
instance = model_class(**payload)
|
|
439
|
-
except ValidationError as e:
|
|
440
|
-
raise HTTPException(
|
|
441
|
-
status_code=422, detail=f"Validation error: {e!s}"
|
|
442
|
-
)
|
|
443
|
-
|
|
444
|
-
# Invoke agent
|
|
445
|
-
outputs = await orchestrator.invoke(agent, instance)
|
|
446
|
-
|
|
447
|
-
# Generate invocation ID from first output or create new UUID
|
|
448
|
-
invocation_id = str(outputs[0].id) if outputs else str(uuid4())
|
|
449
|
-
|
|
450
|
-
# Extract correlation_id from first output (for filter automation)
|
|
451
|
-
correlation_id = (
|
|
452
|
-
str(outputs[0].correlation_id)
|
|
453
|
-
if outputs and outputs[0].correlation_id
|
|
454
|
-
else None
|
|
455
|
-
)
|
|
456
|
-
|
|
457
|
-
return {
|
|
458
|
-
"invocation_id": invocation_id,
|
|
459
|
-
"correlation_id": correlation_id,
|
|
460
|
-
"result": "success",
|
|
461
|
-
}
|
|
462
|
-
|
|
463
|
-
except HTTPException:
|
|
464
|
-
raise
|
|
465
|
-
except KeyError:
|
|
466
|
-
raise HTTPException(
|
|
467
|
-
status_code=422, detail=f"Unknown type: {input_type}"
|
|
468
|
-
)
|
|
469
|
-
except Exception as e:
|
|
470
|
-
logger.exception(f"Error invoking agent: {e}")
|
|
471
|
-
raise HTTPException(status_code=500, detail=str(e))
|
|
472
|
-
|
|
473
|
-
@app.post("/api/control/pause", tags=["Dashboard UI"])
|
|
474
|
-
async def pause_orchestrator() -> dict[str, Any]:
|
|
475
|
-
"""Pause orchestrator (placeholder).
|
|
476
|
-
|
|
477
|
-
Returns:
|
|
478
|
-
501 Not Implemented
|
|
479
|
-
"""
|
|
480
|
-
raise HTTPException(
|
|
481
|
-
status_code=501, detail="Pause functionality coming in Phase 12"
|
|
482
|
-
)
|
|
483
|
-
|
|
484
|
-
@app.post("/api/control/resume", tags=["Dashboard UI"])
|
|
485
|
-
async def resume_orchestrator() -> dict[str, Any]:
|
|
486
|
-
"""Resume orchestrator (placeholder).
|
|
487
|
-
|
|
488
|
-
Returns:
|
|
489
|
-
501 Not Implemented
|
|
490
|
-
"""
|
|
491
|
-
raise HTTPException(
|
|
492
|
-
status_code=501, detail="Resume functionality coming in Phase 12"
|
|
493
|
-
)
|
|
494
|
-
|
|
495
|
-
@app.get("/api/traces", tags=["Dashboard UI"])
|
|
496
|
-
async def get_traces() -> list[dict[str, Any]]:
|
|
497
|
-
"""Get OpenTelemetry traces from DuckDB.
|
|
498
|
-
|
|
499
|
-
Returns list of trace spans in OTEL format.
|
|
500
|
-
|
|
501
|
-
Returns:
|
|
502
|
-
[
|
|
503
|
-
{
|
|
504
|
-
"name": "Agent.execute",
|
|
505
|
-
"context": {
|
|
506
|
-
"trace_id": "...",
|
|
507
|
-
"span_id": "...",
|
|
508
|
-
...
|
|
509
|
-
},
|
|
510
|
-
"start_time": 1234567890,
|
|
511
|
-
"end_time": 1234567891,
|
|
512
|
-
"attributes": {...},
|
|
513
|
-
"status": {...}
|
|
514
|
-
},
|
|
515
|
-
...
|
|
516
|
-
]
|
|
517
|
-
"""
|
|
518
|
-
import json
|
|
519
|
-
from pathlib import Path
|
|
520
|
-
|
|
521
|
-
import duckdb
|
|
522
|
-
|
|
523
|
-
db_path = Path(".flock/traces.duckdb")
|
|
524
|
-
|
|
525
|
-
if not db_path.exists():
|
|
526
|
-
logger.warning(
|
|
527
|
-
"Trace database not found. Make sure FLOCK_AUTO_TRACE=true FLOCK_TRACE_FILE=true"
|
|
528
|
-
)
|
|
529
|
-
return []
|
|
530
|
-
|
|
531
|
-
try:
|
|
532
|
-
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
533
|
-
# Query all spans from DuckDB
|
|
534
|
-
result = conn.execute("""
|
|
535
|
-
SELECT
|
|
536
|
-
trace_id, span_id, parent_id, name, service, operation,
|
|
537
|
-
kind, start_time, end_time, duration_ms,
|
|
538
|
-
status_code, status_description,
|
|
539
|
-
attributes, events, links, resource
|
|
540
|
-
FROM spans
|
|
541
|
-
ORDER BY start_time DESC
|
|
542
|
-
""").fetchall()
|
|
543
|
-
|
|
544
|
-
spans = []
|
|
545
|
-
for row in result:
|
|
546
|
-
# Reconstruct OTEL span format from DuckDB row
|
|
547
|
-
span = {
|
|
548
|
-
"name": row[3], # name
|
|
549
|
-
"context": {
|
|
550
|
-
"trace_id": row[0], # trace_id
|
|
551
|
-
"span_id": row[1], # span_id
|
|
552
|
-
"trace_flags": 0,
|
|
553
|
-
"trace_state": "",
|
|
554
|
-
},
|
|
555
|
-
"kind": row[6], # kind
|
|
556
|
-
"start_time": row[7], # start_time
|
|
557
|
-
"end_time": row[8], # end_time
|
|
558
|
-
"status": {
|
|
559
|
-
"status_code": row[10], # status_code
|
|
560
|
-
"description": row[11], # status_description
|
|
561
|
-
},
|
|
562
|
-
"attributes": json.loads(row[12])
|
|
563
|
-
if row[12]
|
|
564
|
-
else {}, # attributes
|
|
565
|
-
"events": json.loads(row[13]) if row[13] else [], # events
|
|
566
|
-
"links": json.loads(row[14]) if row[14] else [], # links
|
|
567
|
-
"resource": json.loads(row[15])
|
|
568
|
-
if row[15]
|
|
569
|
-
else {}, # resource
|
|
570
|
-
}
|
|
571
|
-
|
|
572
|
-
# Add parent_id if exists
|
|
573
|
-
if row[2]: # parent_id
|
|
574
|
-
span["parent_id"] = row[2]
|
|
575
|
-
|
|
576
|
-
spans.append(span)
|
|
577
|
-
|
|
578
|
-
logger.debug(f"Loaded {len(spans)} spans from DuckDB")
|
|
579
|
-
return spans
|
|
580
|
-
|
|
581
|
-
except Exception as e:
|
|
582
|
-
logger.exception(f"Error reading traces from DuckDB: {e}")
|
|
583
|
-
return []
|
|
584
|
-
|
|
585
|
-
@app.get("/api/traces/services", tags=["Dashboard UI"])
|
|
586
|
-
async def get_trace_services() -> dict[str, Any]:
|
|
587
|
-
"""Get list of unique services that have been traced.
|
|
588
|
-
|
|
589
|
-
Returns:
|
|
590
|
-
{
|
|
591
|
-
"services": ["Flock", "Agent", "DSPyEngine", ...],
|
|
592
|
-
"operations": ["Flock.publish", "Agent.execute", ...]
|
|
593
|
-
}
|
|
594
|
-
"""
|
|
595
|
-
from pathlib import Path
|
|
596
|
-
|
|
597
|
-
import duckdb
|
|
598
|
-
|
|
599
|
-
db_path = Path(".flock/traces.duckdb")
|
|
600
|
-
|
|
601
|
-
if not db_path.exists():
|
|
602
|
-
return {"services": [], "operations": []}
|
|
603
|
-
|
|
604
|
-
try:
|
|
605
|
-
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
606
|
-
# Get unique services
|
|
607
|
-
services_result = conn.execute("""
|
|
608
|
-
SELECT DISTINCT service
|
|
609
|
-
FROM spans
|
|
610
|
-
WHERE service IS NOT NULL
|
|
611
|
-
ORDER BY service
|
|
612
|
-
""").fetchall()
|
|
613
|
-
|
|
614
|
-
# Get unique operations
|
|
615
|
-
operations_result = conn.execute("""
|
|
616
|
-
SELECT DISTINCT name
|
|
617
|
-
FROM spans
|
|
618
|
-
WHERE name IS NOT NULL
|
|
619
|
-
ORDER BY name
|
|
620
|
-
""").fetchall()
|
|
621
|
-
|
|
622
|
-
return {
|
|
623
|
-
"services": [row[0] for row in services_result],
|
|
624
|
-
"operations": [row[0] for row in operations_result],
|
|
625
|
-
}
|
|
626
|
-
|
|
627
|
-
except Exception as e:
|
|
628
|
-
logger.exception(f"Error reading trace services: {e}")
|
|
629
|
-
return {"services": [], "operations": []}
|
|
630
|
-
|
|
631
|
-
@app.post("/api/traces/clear", tags=["Dashboard UI"])
|
|
632
|
-
async def clear_traces() -> dict[str, Any]:
|
|
633
|
-
"""Clear all traces from DuckDB database.
|
|
634
|
-
|
|
635
|
-
Returns:
|
|
636
|
-
{
|
|
637
|
-
"success": true,
|
|
638
|
-
"deleted_count": 123,
|
|
639
|
-
"error": null
|
|
640
|
-
}
|
|
641
|
-
"""
|
|
642
|
-
result = Flock.clear_traces()
|
|
643
|
-
if result["success"]:
|
|
644
|
-
logger.info(f"Cleared {result['deleted_count']} trace spans via API")
|
|
645
|
-
else:
|
|
646
|
-
logger.error(f"Failed to clear traces: {result['error']}")
|
|
647
|
-
|
|
648
|
-
return result
|
|
649
|
-
|
|
650
|
-
@app.post("/api/traces/query", tags=["Dashboard UI"])
|
|
651
|
-
async def execute_trace_query(request: dict[str, Any]) -> dict[str, Any]:
|
|
652
|
-
"""
|
|
653
|
-
Execute a DuckDB SQL query on the traces database.
|
|
654
|
-
|
|
655
|
-
Security: Only SELECT queries allowed, rate-limited.
|
|
656
|
-
"""
|
|
657
|
-
from pathlib import Path
|
|
658
|
-
|
|
659
|
-
import duckdb
|
|
660
|
-
|
|
661
|
-
query = request.get("query", "").strip()
|
|
662
|
-
|
|
663
|
-
if not query:
|
|
664
|
-
return {"error": "Query cannot be empty", "results": [], "columns": []}
|
|
665
|
-
|
|
666
|
-
# Security: Only allow SELECT queries
|
|
667
|
-
query_upper = query.upper().strip()
|
|
668
|
-
if not query_upper.startswith("SELECT"):
|
|
669
|
-
return {
|
|
670
|
-
"error": "Only SELECT queries are allowed",
|
|
671
|
-
"results": [],
|
|
672
|
-
"columns": [],
|
|
673
|
-
}
|
|
674
|
-
|
|
675
|
-
# Check for dangerous keywords
|
|
676
|
-
dangerous = [
|
|
677
|
-
"DROP",
|
|
678
|
-
"DELETE",
|
|
679
|
-
"INSERT",
|
|
680
|
-
"UPDATE",
|
|
681
|
-
"ALTER",
|
|
682
|
-
"CREATE",
|
|
683
|
-
"TRUNCATE",
|
|
684
|
-
]
|
|
685
|
-
if any(keyword in query_upper for keyword in dangerous):
|
|
686
|
-
return {
|
|
687
|
-
"error": "Query contains forbidden operations",
|
|
688
|
-
"results": [],
|
|
689
|
-
"columns": [],
|
|
690
|
-
}
|
|
691
|
-
|
|
692
|
-
db_path = Path(".flock/traces.duckdb")
|
|
693
|
-
if not db_path.exists():
|
|
694
|
-
return {
|
|
695
|
-
"error": "Trace database not found",
|
|
696
|
-
"results": [],
|
|
697
|
-
"columns": [],
|
|
698
|
-
}
|
|
699
|
-
|
|
700
|
-
try:
|
|
701
|
-
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
702
|
-
result = conn.execute(query).fetchall()
|
|
703
|
-
columns = (
|
|
704
|
-
[desc[0] for desc in conn.description]
|
|
705
|
-
if conn.description
|
|
706
|
-
else []
|
|
707
|
-
)
|
|
708
|
-
|
|
709
|
-
# Convert to JSON-serializable format
|
|
710
|
-
results = []
|
|
711
|
-
for row in result:
|
|
712
|
-
row_dict = {}
|
|
713
|
-
for i, col in enumerate(columns):
|
|
714
|
-
val = row[i]
|
|
715
|
-
# Convert bytes to string, handle other types
|
|
716
|
-
if isinstance(val, bytes):
|
|
717
|
-
row_dict[col] = val.decode("utf-8")
|
|
718
|
-
else:
|
|
719
|
-
row_dict[col] = val
|
|
720
|
-
results.append(row_dict)
|
|
721
|
-
|
|
722
|
-
return {
|
|
723
|
-
"results": results,
|
|
724
|
-
"columns": columns,
|
|
725
|
-
"row_count": len(results),
|
|
726
|
-
}
|
|
727
|
-
except Exception as e:
|
|
728
|
-
logger.exception(f"DuckDB query error: {e}")
|
|
729
|
-
return {"error": str(e), "results": [], "columns": []}
|
|
730
|
-
|
|
731
|
-
@app.get("/api/traces/stats", tags=["Dashboard UI"])
|
|
732
|
-
async def get_trace_stats() -> dict[str, Any]:
|
|
733
|
-
"""Get statistics about the trace database.
|
|
734
|
-
|
|
735
|
-
Returns:
|
|
736
|
-
{
|
|
737
|
-
"total_spans": 123,
|
|
738
|
-
"total_traces": 45,
|
|
739
|
-
"services_count": 5,
|
|
740
|
-
"oldest_trace": "2025-10-07T12:00:00Z",
|
|
741
|
-
"newest_trace": "2025-10-07T14:30:00Z",
|
|
742
|
-
"database_size_mb": 12.5
|
|
743
|
-
}
|
|
744
|
-
"""
|
|
745
|
-
from datetime import datetime
|
|
746
|
-
from pathlib import Path
|
|
747
|
-
|
|
748
|
-
import duckdb
|
|
749
|
-
|
|
750
|
-
db_path = Path(".flock/traces.duckdb")
|
|
751
|
-
|
|
752
|
-
if not db_path.exists():
|
|
753
|
-
return {
|
|
754
|
-
"total_spans": 0,
|
|
755
|
-
"total_traces": 0,
|
|
756
|
-
"services_count": 0,
|
|
757
|
-
"oldest_trace": None,
|
|
758
|
-
"newest_trace": None,
|
|
759
|
-
"database_size_mb": 0,
|
|
760
|
-
}
|
|
761
|
-
|
|
762
|
-
try:
|
|
763
|
-
with duckdb.connect(str(db_path), read_only=True) as conn:
|
|
764
|
-
# Get total spans
|
|
765
|
-
total_spans = conn.execute("SELECT COUNT(*) FROM spans").fetchone()[
|
|
766
|
-
0
|
|
767
|
-
]
|
|
768
|
-
|
|
769
|
-
# Get total unique traces
|
|
770
|
-
total_traces = conn.execute(
|
|
771
|
-
"SELECT COUNT(DISTINCT trace_id) FROM spans"
|
|
772
|
-
).fetchone()[0]
|
|
773
|
-
|
|
774
|
-
# Get services count
|
|
775
|
-
services_count = conn.execute(
|
|
776
|
-
"SELECT COUNT(DISTINCT service) FROM spans WHERE service IS NOT NULL"
|
|
777
|
-
).fetchone()[0]
|
|
778
119
|
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
|
|
789
|
-
|
|
790
|
-
|
|
791
|
-
oldest_trace = datetime.fromtimestamp(
|
|
792
|
-
time_range[0] / 1_000_000_000, tz=UTC
|
|
793
|
-
).isoformat()
|
|
794
|
-
newest_trace = datetime.fromtimestamp(
|
|
795
|
-
time_range[1] / 1_000_000_000, tz=UTC
|
|
796
|
-
).isoformat()
|
|
797
|
-
|
|
798
|
-
# Get file size
|
|
799
|
-
size_mb = db_path.stat().st_size / (1024 * 1024)
|
|
800
|
-
|
|
801
|
-
return {
|
|
802
|
-
"total_spans": total_spans,
|
|
803
|
-
"total_traces": total_traces,
|
|
804
|
-
"services_count": services_count,
|
|
805
|
-
"oldest_trace": oldest_trace,
|
|
806
|
-
"newest_trace": newest_trace,
|
|
807
|
-
"database_size_mb": round(size_mb, 2),
|
|
808
|
-
}
|
|
809
|
-
|
|
810
|
-
except Exception as e:
|
|
811
|
-
logger.exception(f"Error reading trace stats: {e}")
|
|
812
|
-
return {
|
|
813
|
-
"total_spans": 0,
|
|
814
|
-
"total_traces": 0,
|
|
815
|
-
"services_count": 0,
|
|
816
|
-
"oldest_trace": None,
|
|
817
|
-
"newest_trace": None,
|
|
818
|
-
"database_size_mb": 0,
|
|
819
|
-
}
|
|
820
|
-
|
|
821
|
-
@app.get("/api/streaming-history/{agent_name}", tags=["Dashboard UI"])
|
|
822
|
-
async def get_streaming_history(agent_name: str) -> dict[str, Any]:
|
|
823
|
-
"""Get historical streaming output for a specific agent.
|
|
824
|
-
|
|
825
|
-
Args:
|
|
826
|
-
agent_name: Name of the agent to get streaming history for
|
|
827
|
-
|
|
828
|
-
Returns:
|
|
829
|
-
{
|
|
830
|
-
"agent_name": "agent_name",
|
|
831
|
-
"events": [
|
|
832
|
-
{
|
|
833
|
-
"correlation_id": "...",
|
|
834
|
-
"timestamp": "...",
|
|
835
|
-
"agent_name": "...",
|
|
836
|
-
"run_id": "...",
|
|
837
|
-
"output_type": "llm_token",
|
|
838
|
-
"content": "...",
|
|
839
|
-
"sequence": 0,
|
|
840
|
-
"is_final": false
|
|
841
|
-
},
|
|
842
|
-
...
|
|
843
|
-
]
|
|
844
|
-
}
|
|
845
|
-
"""
|
|
846
|
-
try:
|
|
847
|
-
history = self.websocket_manager.get_streaming_history(agent_name)
|
|
848
|
-
return {
|
|
849
|
-
"agent_name": agent_name,
|
|
850
|
-
"events": [event.model_dump() for event in history],
|
|
851
|
-
}
|
|
852
|
-
except Exception as e:
|
|
853
|
-
logger.exception(
|
|
854
|
-
f"Failed to get streaming history for {agent_name}: {e}"
|
|
855
|
-
)
|
|
856
|
-
raise HTTPException(
|
|
857
|
-
status_code=500, detail=f"Failed to get streaming history: {e!s}"
|
|
858
|
-
)
|
|
859
|
-
|
|
860
|
-
@app.get("/api/artifacts/history/{node_id}", tags=["Dashboard UI"])
|
|
861
|
-
async def get_message_history(node_id: str) -> dict[str, Any]:
|
|
862
|
-
"""Get complete message history for a node (both produced and consumed).
|
|
863
|
-
|
|
864
|
-
Phase 4.1 Feature Gap Fix: Returns both messages produced by AND consumed by
|
|
865
|
-
the specified node, enabling complete message history view in MessageHistoryTab.
|
|
866
|
-
|
|
867
|
-
Args:
|
|
868
|
-
node_id: ID of the node (agent name or message ID)
|
|
869
|
-
|
|
870
|
-
Returns:
|
|
871
|
-
{
|
|
872
|
-
"node_id": "agent_name",
|
|
873
|
-
"messages": [
|
|
874
|
-
{
|
|
875
|
-
"id": "artifact-uuid",
|
|
876
|
-
"type": "ArtifactType",
|
|
877
|
-
"direction": "published"|"consumed",
|
|
878
|
-
"payload": {...},
|
|
879
|
-
"timestamp": "2025-10-11T...",
|
|
880
|
-
"correlation_id": "uuid",
|
|
881
|
-
"produced_by": "producer_name",
|
|
882
|
-
"consumed_at": "2025-10-11T..." (only for consumed)
|
|
883
|
-
},
|
|
884
|
-
...
|
|
885
|
-
],
|
|
886
|
-
"total": 123
|
|
887
|
-
}
|
|
888
|
-
"""
|
|
889
|
-
try:
|
|
890
|
-
from flock.store import FilterConfig
|
|
891
|
-
|
|
892
|
-
messages = []
|
|
893
|
-
|
|
894
|
-
# 1. Get messages PRODUCED by this node
|
|
895
|
-
produced_filter = FilterConfig(produced_by={node_id})
|
|
896
|
-
(
|
|
897
|
-
produced_artifacts,
|
|
898
|
-
_produced_count,
|
|
899
|
-
) = await orchestrator.store.query_artifacts(
|
|
900
|
-
produced_filter, limit=100, offset=0, embed_meta=False
|
|
901
|
-
)
|
|
902
|
-
|
|
903
|
-
for artifact in produced_artifacts:
|
|
904
|
-
messages.append({
|
|
905
|
-
"id": str(artifact.id),
|
|
906
|
-
"type": artifact.type,
|
|
907
|
-
"direction": "published",
|
|
908
|
-
"payload": artifact.payload,
|
|
909
|
-
"timestamp": artifact.created_at.isoformat(),
|
|
910
|
-
"correlation_id": str(artifact.correlation_id)
|
|
911
|
-
if artifact.correlation_id
|
|
912
|
-
else None,
|
|
913
|
-
"produced_by": artifact.produced_by,
|
|
914
|
-
})
|
|
915
|
-
|
|
916
|
-
# 2. Get messages CONSUMED by this node
|
|
917
|
-
# Query all artifacts with consumption metadata
|
|
918
|
-
all_artifacts_filter = FilterConfig() # No filter = all artifacts
|
|
919
|
-
all_envelopes, _ = await orchestrator.store.query_artifacts(
|
|
920
|
-
all_artifacts_filter, limit=500, offset=0, embed_meta=True
|
|
921
|
-
)
|
|
922
|
-
|
|
923
|
-
for envelope in all_envelopes:
|
|
924
|
-
artifact = envelope.artifact
|
|
925
|
-
for consumption in envelope.consumptions:
|
|
926
|
-
if consumption.consumer == node_id:
|
|
927
|
-
messages.append({
|
|
928
|
-
"id": str(artifact.id),
|
|
929
|
-
"type": artifact.type,
|
|
930
|
-
"direction": "consumed",
|
|
931
|
-
"payload": artifact.payload,
|
|
932
|
-
"timestamp": artifact.created_at.isoformat(),
|
|
933
|
-
"correlation_id": str(artifact.correlation_id)
|
|
934
|
-
if artifact.correlation_id
|
|
935
|
-
else None,
|
|
936
|
-
"produced_by": artifact.produced_by,
|
|
937
|
-
"consumed_at": consumption.consumed_at.isoformat(),
|
|
938
|
-
})
|
|
939
|
-
|
|
940
|
-
# Sort by timestamp (most recent first)
|
|
941
|
-
messages.sort(
|
|
942
|
-
key=lambda m: m.get("consumed_at", m["timestamp"]), reverse=True
|
|
943
|
-
)
|
|
944
|
-
|
|
945
|
-
return {
|
|
946
|
-
"node_id": node_id,
|
|
947
|
-
"messages": messages,
|
|
948
|
-
"total": len(messages),
|
|
949
|
-
}
|
|
950
|
-
|
|
951
|
-
except Exception as e:
|
|
952
|
-
logger.exception(f"Failed to get message history for {node_id}: {e}")
|
|
953
|
-
raise HTTPException(
|
|
954
|
-
status_code=500, detail=f"Failed to get message history: {e!s}"
|
|
955
|
-
)
|
|
956
|
-
|
|
957
|
-
@app.get("/api/agents/{agent_id}/runs", tags=["Dashboard UI"])
|
|
958
|
-
async def get_agent_runs(agent_id: str) -> dict[str, Any]:
|
|
959
|
-
"""Get run history for an agent.
|
|
960
|
-
|
|
961
|
-
Phase 4.1 Feature Gap Fix: Returns agent execution history with metrics
|
|
962
|
-
for display in RunStatusTab.
|
|
963
|
-
|
|
964
|
-
Args:
|
|
965
|
-
agent_id: ID of the agent
|
|
966
|
-
|
|
967
|
-
Returns:
|
|
968
|
-
{
|
|
969
|
-
"agent_id": "agent_name",
|
|
970
|
-
"runs": [
|
|
971
|
-
{
|
|
972
|
-
"run_id": "uuid",
|
|
973
|
-
"start_time": "2025-10-11T...",
|
|
974
|
-
"end_time": "2025-10-11T...",
|
|
975
|
-
"duration_ms": 1234,
|
|
976
|
-
"status": "completed"|"active"|"error",
|
|
977
|
-
"metrics": {
|
|
978
|
-
"tokens_used": 123,
|
|
979
|
-
"cost_usd": 0.0012,
|
|
980
|
-
"artifacts_produced": 5
|
|
981
|
-
},
|
|
982
|
-
"error_message": "error details" (if status=error)
|
|
983
|
-
},
|
|
984
|
-
...
|
|
985
|
-
],
|
|
986
|
-
"total": 50
|
|
987
|
-
}
|
|
988
|
-
"""
|
|
989
|
-
try:
|
|
990
|
-
# TODO: Implement run history tracking in orchestrator
|
|
991
|
-
# For now, return empty array with proper structure
|
|
992
|
-
# This unblocks frontend development and can be enhanced later
|
|
993
|
-
|
|
994
|
-
runs = []
|
|
995
|
-
|
|
996
|
-
# FUTURE: Query run history from orchestrator or store
|
|
997
|
-
# Example implementation when run tracking is added:
|
|
998
|
-
# runs = await orchestrator.get_agent_run_history(agent_id, limit=50)
|
|
999
|
-
|
|
1000
|
-
return {"agent_id": agent_id, "runs": runs, "total": len(runs)}
|
|
1001
|
-
|
|
1002
|
-
except Exception as e:
|
|
1003
|
-
logger.exception(f"Failed to get run history for {agent_id}: {e}")
|
|
1004
|
-
raise HTTPException(
|
|
1005
|
-
status_code=500, detail=f"Failed to get run history: {e!s}"
|
|
1006
|
-
)
|
|
1007
|
-
|
|
1008
|
-
def _register_theme_routes(self) -> None:
|
|
1009
|
-
"""Register theme API endpoints for dashboard customization."""
|
|
1010
|
-
from pathlib import Path
|
|
1011
|
-
|
|
1012
|
-
import toml
|
|
1013
|
-
|
|
1014
|
-
app = self.app
|
|
1015
|
-
themes_dir = Path(__file__).parent.parent / "themes"
|
|
1016
|
-
|
|
1017
|
-
@app.get("/api/themes", tags=["Dashboard UI"])
|
|
1018
|
-
async def list_themes() -> dict[str, Any]:
|
|
1019
|
-
"""Get list of available theme names.
|
|
1020
|
-
|
|
1021
|
-
Returns:
|
|
1022
|
-
{"themes": ["dracula", "nord", ...]}
|
|
1023
|
-
"""
|
|
1024
|
-
try:
|
|
1025
|
-
if not themes_dir.exists():
|
|
1026
|
-
return {"themes": []}
|
|
1027
|
-
|
|
1028
|
-
theme_files = list(themes_dir.glob("*.toml"))
|
|
1029
|
-
theme_names = sorted([f.stem for f in theme_files])
|
|
1030
|
-
|
|
1031
|
-
return {"themes": theme_names}
|
|
1032
|
-
except Exception as e:
|
|
1033
|
-
logger.exception(f"Failed to list themes: {e}")
|
|
1034
|
-
raise HTTPException(
|
|
1035
|
-
status_code=500, detail=f"Failed to list themes: {e!s}"
|
|
1036
|
-
)
|
|
1037
|
-
|
|
1038
|
-
@app.get("/api/themes/{theme_name}", tags=["Dashboard UI"])
|
|
1039
|
-
async def get_theme(theme_name: str) -> dict[str, Any]:
|
|
1040
|
-
"""Get theme data by name.
|
|
1041
|
-
|
|
1042
|
-
Args:
|
|
1043
|
-
theme_name: Name of theme (without .toml extension)
|
|
1044
|
-
|
|
1045
|
-
Returns:
|
|
1046
|
-
{
|
|
1047
|
-
"name": "dracula",
|
|
1048
|
-
"data": {
|
|
1049
|
-
"colors": {...}
|
|
1050
|
-
}
|
|
1051
|
-
}
|
|
1052
|
-
"""
|
|
1053
|
-
try:
|
|
1054
|
-
# Sanitize theme name to prevent path traversal
|
|
1055
|
-
theme_name = (
|
|
1056
|
-
theme_name.replace("/", "").replace("\\", "").replace("..", "")
|
|
1057
|
-
)
|
|
1058
|
-
|
|
1059
|
-
theme_path = themes_dir / f"{theme_name}.toml"
|
|
1060
|
-
|
|
1061
|
-
if not theme_path.exists():
|
|
1062
|
-
raise HTTPException(
|
|
1063
|
-
status_code=404, detail=f"Theme '{theme_name}' not found"
|
|
1064
|
-
)
|
|
1065
|
-
|
|
1066
|
-
# Load TOML theme
|
|
1067
|
-
theme_data = toml.load(theme_path)
|
|
1068
|
-
|
|
1069
|
-
return {"name": theme_name, "data": theme_data}
|
|
1070
|
-
except HTTPException:
|
|
1071
|
-
raise
|
|
1072
|
-
except Exception as e:
|
|
1073
|
-
logger.exception(f"Failed to load theme '{theme_name}': {e}")
|
|
1074
|
-
raise HTTPException(
|
|
1075
|
-
status_code=500, detail=f"Failed to load theme: {e!s}"
|
|
1076
|
-
)
|
|
120
|
+
# Register theme routes (list, get)
|
|
121
|
+
register_theme_routes(app=self.app)
|
|
122
|
+
|
|
123
|
+
# Register WebSocket endpoint and static files (must be last!)
|
|
124
|
+
register_websocket_routes(
|
|
125
|
+
app=self.app,
|
|
126
|
+
orchestrator=self.orchestrator,
|
|
127
|
+
websocket_manager=self.websocket_manager,
|
|
128
|
+
event_collector=self.event_collector,
|
|
129
|
+
graph_assembler=self.graph_assembler,
|
|
130
|
+
use_v2=self.use_v2,
|
|
131
|
+
)
|
|
1077
132
|
|
|
1078
133
|
async def start(self) -> None:
|
|
1079
134
|
"""Start the dashboard service.
|
|
@@ -1103,332 +158,4 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
1103
158
|
return self.app
|
|
1104
159
|
|
|
1105
160
|
|
|
1106
|
-
def _get_correlation_groups(
|
|
1107
|
-
engine: "CorrelationEngine", # noqa: F821
|
|
1108
|
-
agent_name: str,
|
|
1109
|
-
subscription_index: int,
|
|
1110
|
-
) -> list[dict[str, Any]]:
|
|
1111
|
-
"""Extract correlation group state from CorrelationEngine.
|
|
1112
|
-
|
|
1113
|
-
Returns waiting state for all correlation groups for the given agent subscription.
|
|
1114
|
-
Used by enhanced /api/agents endpoint to expose JoinSpec waiting state.
|
|
1115
|
-
|
|
1116
|
-
Args:
|
|
1117
|
-
engine: CorrelationEngine instance from orchestrator
|
|
1118
|
-
agent_name: Name of the agent
|
|
1119
|
-
subscription_index: Index of the subscription (for agents with multiple subscriptions)
|
|
1120
|
-
|
|
1121
|
-
Returns:
|
|
1122
|
-
List of correlation group states with progress metrics:
|
|
1123
|
-
[
|
|
1124
|
-
{
|
|
1125
|
-
"correlation_key": "patient_123",
|
|
1126
|
-
"created_at": "2025-10-13T14:30:00Z",
|
|
1127
|
-
"elapsed_seconds": 45.2,
|
|
1128
|
-
"expires_in_seconds": 254.8, # For time windows
|
|
1129
|
-
"expires_in_artifacts": 7, # For count windows
|
|
1130
|
-
"collected_types": {"XRayImage": 1, "LabResults": 0},
|
|
1131
|
-
"required_types": {"XRayImage": 1, "LabResults": 1},
|
|
1132
|
-
"waiting_for": ["LabResults"],
|
|
1133
|
-
"is_complete": False,
|
|
1134
|
-
"is_expired": False
|
|
1135
|
-
},
|
|
1136
|
-
...
|
|
1137
|
-
]
|
|
1138
|
-
"""
|
|
1139
|
-
|
|
1140
|
-
pool_key = (agent_name, subscription_index)
|
|
1141
|
-
groups = engine.correlation_groups.get(pool_key, {})
|
|
1142
|
-
|
|
1143
|
-
if not groups:
|
|
1144
|
-
return []
|
|
1145
|
-
|
|
1146
|
-
now = datetime.now(UTC)
|
|
1147
|
-
result = []
|
|
1148
|
-
|
|
1149
|
-
for corr_key, group in groups.items():
|
|
1150
|
-
# Calculate elapsed time
|
|
1151
|
-
if group.created_at_time:
|
|
1152
|
-
created_at_time = group.created_at_time
|
|
1153
|
-
if created_at_time.tzinfo is None:
|
|
1154
|
-
created_at_time = created_at_time.replace(tzinfo=UTC)
|
|
1155
|
-
elapsed = (now - created_at_time).total_seconds()
|
|
1156
|
-
else:
|
|
1157
|
-
elapsed = 0
|
|
1158
|
-
|
|
1159
|
-
# Calculate time remaining (for time windows)
|
|
1160
|
-
expires_in_seconds = None
|
|
1161
|
-
if isinstance(group.window_spec, timedelta):
|
|
1162
|
-
window_seconds = group.window_spec.total_seconds()
|
|
1163
|
-
expires_in_seconds = max(0, window_seconds - elapsed)
|
|
1164
|
-
|
|
1165
|
-
# Calculate artifact count remaining (for count windows)
|
|
1166
|
-
expires_in_artifacts = None
|
|
1167
|
-
if isinstance(group.window_spec, int):
|
|
1168
|
-
artifacts_passed = engine.global_sequence - group.created_at_sequence
|
|
1169
|
-
expires_in_artifacts = max(0, group.window_spec - artifacts_passed)
|
|
1170
|
-
|
|
1171
|
-
# Determine what we're waiting for
|
|
1172
|
-
collected_types = {
|
|
1173
|
-
type_name: len(group.waiting_artifacts.get(type_name, []))
|
|
1174
|
-
for type_name in group.required_types
|
|
1175
|
-
}
|
|
1176
|
-
|
|
1177
|
-
waiting_for = [
|
|
1178
|
-
type_name
|
|
1179
|
-
for type_name, required_count in group.type_counts.items()
|
|
1180
|
-
if collected_types.get(type_name, 0) < required_count
|
|
1181
|
-
]
|
|
1182
|
-
|
|
1183
|
-
result.append({
|
|
1184
|
-
"correlation_key": str(corr_key),
|
|
1185
|
-
"created_at": group.created_at_time.isoformat()
|
|
1186
|
-
if group.created_at_time
|
|
1187
|
-
else None,
|
|
1188
|
-
"elapsed_seconds": round(elapsed, 1),
|
|
1189
|
-
"expires_in_seconds": round(expires_in_seconds, 1)
|
|
1190
|
-
if expires_in_seconds is not None
|
|
1191
|
-
else None,
|
|
1192
|
-
"expires_in_artifacts": expires_in_artifacts,
|
|
1193
|
-
"collected_types": collected_types,
|
|
1194
|
-
"required_types": dict(group.type_counts),
|
|
1195
|
-
"waiting_for": waiting_for,
|
|
1196
|
-
"is_complete": group.is_complete(),
|
|
1197
|
-
"is_expired": group.is_expired(engine.global_sequence),
|
|
1198
|
-
})
|
|
1199
|
-
|
|
1200
|
-
return result
|
|
1201
|
-
|
|
1202
|
-
|
|
1203
|
-
def _get_batch_state(
|
|
1204
|
-
engine: "BatchEngine", # noqa: F821
|
|
1205
|
-
agent_name: str,
|
|
1206
|
-
subscription_index: int,
|
|
1207
|
-
batch_spec: "BatchSpec", # noqa: F821
|
|
1208
|
-
) -> dict[str, Any] | None:
|
|
1209
|
-
"""Extract batch state from BatchEngine.
|
|
1210
|
-
|
|
1211
|
-
Returns current batch accumulator state for the given agent subscription.
|
|
1212
|
-
Used by enhanced /api/agents endpoint to expose BatchSpec waiting state.
|
|
1213
|
-
|
|
1214
|
-
Args:
|
|
1215
|
-
engine: BatchEngine instance from orchestrator
|
|
1216
|
-
agent_name: Name of the agent
|
|
1217
|
-
subscription_index: Index of the subscription
|
|
1218
|
-
batch_spec: BatchSpec configuration (needed for metrics)
|
|
1219
|
-
|
|
1220
|
-
Returns:
|
|
1221
|
-
Batch state dict or None if no batch or batch is empty:
|
|
1222
|
-
{
|
|
1223
|
-
"created_at": "2025-10-13T14:30:00Z",
|
|
1224
|
-
"elapsed_seconds": 12.5,
|
|
1225
|
-
"items_collected": 18,
|
|
1226
|
-
"items_target": 25,
|
|
1227
|
-
"items_remaining": 7,
|
|
1228
|
-
"timeout_seconds": 30,
|
|
1229
|
-
"timeout_remaining_seconds": 17.5,
|
|
1230
|
-
"will_flush": "on_size" | "on_timeout" | "unknown"
|
|
1231
|
-
}
|
|
1232
|
-
"""
|
|
1233
|
-
|
|
1234
|
-
batch_key = (agent_name, subscription_index)
|
|
1235
|
-
accumulator = engine.batches.get(batch_key)
|
|
1236
|
-
|
|
1237
|
-
# Return None if no batch or batch is empty
|
|
1238
|
-
if not accumulator or not accumulator.artifacts:
|
|
1239
|
-
return None
|
|
1240
|
-
|
|
1241
|
-
now = datetime.now(UTC)
|
|
1242
|
-
# Ensure accumulator.created_at is timezone-aware
|
|
1243
|
-
created_at = accumulator.created_at
|
|
1244
|
-
if created_at.tzinfo is None:
|
|
1245
|
-
created_at = created_at.replace(tzinfo=UTC)
|
|
1246
|
-
elapsed = (now - created_at).total_seconds()
|
|
1247
|
-
|
|
1248
|
-
# Calculate items collected (needed for all batch types)
|
|
1249
|
-
items_collected = len(accumulator.artifacts)
|
|
1250
|
-
# For group batching, use _group_count if available
|
|
1251
|
-
if hasattr(accumulator, "_group_count"):
|
|
1252
|
-
items_collected = accumulator._group_count
|
|
1253
|
-
|
|
1254
|
-
result = {
|
|
1255
|
-
"created_at": accumulator.created_at.isoformat(),
|
|
1256
|
-
"elapsed_seconds": round(elapsed, 1),
|
|
1257
|
-
"items_collected": items_collected, # Always include for all batch types
|
|
1258
|
-
}
|
|
1259
|
-
|
|
1260
|
-
# Size-based metrics (only if size threshold configured)
|
|
1261
|
-
if batch_spec.size:
|
|
1262
|
-
result["items_target"] = batch_spec.size
|
|
1263
|
-
result["items_remaining"] = max(0, batch_spec.size - items_collected)
|
|
1264
|
-
else:
|
|
1265
|
-
# Timeout-only batches: no target
|
|
1266
|
-
result["items_target"] = None
|
|
1267
|
-
result["items_remaining"] = None
|
|
1268
|
-
|
|
1269
|
-
# Timeout-based metrics
|
|
1270
|
-
if batch_spec.timeout:
|
|
1271
|
-
timeout_seconds = batch_spec.timeout.total_seconds()
|
|
1272
|
-
timeout_remaining = max(0, timeout_seconds - elapsed)
|
|
1273
|
-
|
|
1274
|
-
result["timeout_seconds"] = int(timeout_seconds)
|
|
1275
|
-
result["timeout_remaining_seconds"] = round(timeout_remaining, 1)
|
|
1276
|
-
|
|
1277
|
-
# Determine what will trigger flush
|
|
1278
|
-
if batch_spec.size and batch_spec.timeout:
|
|
1279
|
-
# Hybrid: predict which will fire first based on progress percentages
|
|
1280
|
-
items_collected = result["items_collected"]
|
|
1281
|
-
items_target = result.get("items_target", 1)
|
|
1282
|
-
timeout_remaining = result.get("timeout_remaining_seconds", 0)
|
|
1283
|
-
|
|
1284
|
-
# Calculate progress toward each threshold
|
|
1285
|
-
size_progress = items_collected / items_target if items_target > 0 else 0
|
|
1286
|
-
timeout_elapsed = elapsed
|
|
1287
|
-
timeout_total = batch_spec.timeout.total_seconds()
|
|
1288
|
-
time_progress = timeout_elapsed / timeout_total if timeout_total > 0 else 0
|
|
1289
|
-
|
|
1290
|
-
# Predict based on which threshold we're progressing toward faster
|
|
1291
|
-
# If we're closer to size threshold (percentage-wise), predict size
|
|
1292
|
-
# Otherwise predict timeout
|
|
1293
|
-
if size_progress > time_progress:
|
|
1294
|
-
result["will_flush"] = "on_size"
|
|
1295
|
-
else:
|
|
1296
|
-
result["will_flush"] = "on_timeout"
|
|
1297
|
-
elif batch_spec.size:
|
|
1298
|
-
result["will_flush"] = "on_size"
|
|
1299
|
-
elif batch_spec.timeout:
|
|
1300
|
-
result["will_flush"] = "on_timeout"
|
|
1301
|
-
|
|
1302
|
-
return result
|
|
1303
|
-
|
|
1304
|
-
|
|
1305
|
-
def _compute_agent_status(agent: "Agent", orchestrator: "Flock") -> str: # noqa: F821
|
|
1306
|
-
"""Determine agent status based on waiting state.
|
|
1307
|
-
|
|
1308
|
-
Checks if agent is waiting for correlation or batch completion.
|
|
1309
|
-
Used by enhanced /api/agents endpoint to show agent status.
|
|
1310
|
-
|
|
1311
|
-
Args:
|
|
1312
|
-
agent: Agent instance
|
|
1313
|
-
orchestrator: Flock orchestrator instance
|
|
1314
|
-
|
|
1315
|
-
Returns:
|
|
1316
|
-
"ready" - Agent not waiting for anything
|
|
1317
|
-
"waiting" - Agent has correlation groups or batches accumulating
|
|
1318
|
-
"active" - Agent currently executing (future enhancement)
|
|
1319
|
-
"""
|
|
1320
|
-
# Check if any subscription is waiting for correlation or batching
|
|
1321
|
-
for idx, subscription in enumerate(agent.subscriptions):
|
|
1322
|
-
if subscription.join:
|
|
1323
|
-
pool_key = (agent.name, idx)
|
|
1324
|
-
if pool_key in orchestrator._correlation_engine.correlation_groups:
|
|
1325
|
-
groups = orchestrator._correlation_engine.correlation_groups[pool_key]
|
|
1326
|
-
if groups: # Has waiting correlation groups
|
|
1327
|
-
return "waiting"
|
|
1328
|
-
|
|
1329
|
-
if subscription.batch:
|
|
1330
|
-
batch_key = (agent.name, idx)
|
|
1331
|
-
if batch_key in orchestrator._batch_engine.batches:
|
|
1332
|
-
accumulator = orchestrator._batch_engine.batches[batch_key]
|
|
1333
|
-
if accumulator and accumulator.artifacts:
|
|
1334
|
-
return "waiting"
|
|
1335
|
-
|
|
1336
|
-
return "ready"
|
|
1337
|
-
|
|
1338
|
-
|
|
1339
|
-
def _build_logic_config( # noqa: F821
|
|
1340
|
-
agent: "Agent", # noqa: F821
|
|
1341
|
-
subscription: "Subscription", # noqa: F821
|
|
1342
|
-
idx: int,
|
|
1343
|
-
orchestrator: "Flock",
|
|
1344
|
-
) -> dict[str, Any] | None:
|
|
1345
|
-
"""Build logic operations configuration for a subscription.
|
|
1346
|
-
|
|
1347
|
-
Phase 1.2: Extracts JoinSpec and BatchSpec configuration plus current
|
|
1348
|
-
waiting state for agents using logic operations.
|
|
1349
|
-
|
|
1350
|
-
Args:
|
|
1351
|
-
agent: Agent instance
|
|
1352
|
-
subscription: Subscription to analyze
|
|
1353
|
-
idx: Subscription index (for agents with multiple subscriptions)
|
|
1354
|
-
orchestrator: Flock orchestrator instance
|
|
1355
|
-
|
|
1356
|
-
Returns:
|
|
1357
|
-
Logic operations config dict or None if no join/batch:
|
|
1358
|
-
{
|
|
1359
|
-
"subscription_index": 0,
|
|
1360
|
-
"subscription_types": ["XRayImage", "LabResults"],
|
|
1361
|
-
"join": {...}, # JoinSpec config (if present)
|
|
1362
|
-
"batch": {...}, # BatchSpec config (if present)
|
|
1363
|
-
"waiting_state": {...} # Current state (if waiting)
|
|
1364
|
-
}
|
|
1365
|
-
"""
|
|
1366
|
-
if not subscription.join and not subscription.batch:
|
|
1367
|
-
return None
|
|
1368
|
-
|
|
1369
|
-
config = {
|
|
1370
|
-
"subscription_index": idx,
|
|
1371
|
-
"subscription_types": list(subscription.type_names),
|
|
1372
|
-
}
|
|
1373
|
-
|
|
1374
|
-
# JoinSpec configuration
|
|
1375
|
-
if subscription.join:
|
|
1376
|
-
join_spec = subscription.join
|
|
1377
|
-
window_type = "time" if isinstance(join_spec.within, timedelta) else "count"
|
|
1378
|
-
window_value = (
|
|
1379
|
-
int(join_spec.within.total_seconds())
|
|
1380
|
-
if isinstance(join_spec.within, timedelta)
|
|
1381
|
-
else join_spec.within
|
|
1382
|
-
)
|
|
1383
|
-
|
|
1384
|
-
config["join"] = {
|
|
1385
|
-
"correlation_strategy": "by_key",
|
|
1386
|
-
"window_type": window_type,
|
|
1387
|
-
"window_value": window_value,
|
|
1388
|
-
"window_unit": "seconds" if window_type == "time" else "artifacts",
|
|
1389
|
-
"required_types": list(subscription.type_names),
|
|
1390
|
-
"type_counts": dict(subscription.type_counts),
|
|
1391
|
-
}
|
|
1392
|
-
|
|
1393
|
-
# Get waiting state from CorrelationEngine
|
|
1394
|
-
correlation_groups = _get_correlation_groups(
|
|
1395
|
-
orchestrator._correlation_engine, agent.name, idx
|
|
1396
|
-
)
|
|
1397
|
-
if correlation_groups:
|
|
1398
|
-
config["waiting_state"] = {
|
|
1399
|
-
"is_waiting": True,
|
|
1400
|
-
"correlation_groups": correlation_groups,
|
|
1401
|
-
}
|
|
1402
|
-
|
|
1403
|
-
# BatchSpec configuration
|
|
1404
|
-
if subscription.batch:
|
|
1405
|
-
batch_spec = subscription.batch
|
|
1406
|
-
strategy = (
|
|
1407
|
-
"hybrid"
|
|
1408
|
-
if batch_spec.size and batch_spec.timeout
|
|
1409
|
-
else "size"
|
|
1410
|
-
if batch_spec.size
|
|
1411
|
-
else "timeout"
|
|
1412
|
-
)
|
|
1413
|
-
|
|
1414
|
-
config["batch"] = {
|
|
1415
|
-
"strategy": strategy,
|
|
1416
|
-
}
|
|
1417
|
-
if batch_spec.size:
|
|
1418
|
-
config["batch"]["size"] = batch_spec.size
|
|
1419
|
-
if batch_spec.timeout:
|
|
1420
|
-
config["batch"]["timeout_seconds"] = int(batch_spec.timeout.total_seconds())
|
|
1421
|
-
|
|
1422
|
-
# Get waiting state from BatchEngine
|
|
1423
|
-
batch_state = _get_batch_state(
|
|
1424
|
-
orchestrator._batch_engine, agent.name, idx, batch_spec
|
|
1425
|
-
)
|
|
1426
|
-
if batch_state:
|
|
1427
|
-
if "waiting_state" not in config:
|
|
1428
|
-
config["waiting_state"] = {"is_waiting": True}
|
|
1429
|
-
config["waiting_state"]["batch_state"] = batch_state
|
|
1430
|
-
|
|
1431
|
-
return config
|
|
1432
|
-
|
|
1433
|
-
|
|
1434
161
|
__all__ = ["DashboardHTTPService"]
|