flock-core 0.5.10__py3-none-any.whl → 0.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (91) hide show
  1. flock/__init__.py +1 -1
  2. flock/agent/__init__.py +30 -0
  3. flock/agent/builder_helpers.py +192 -0
  4. flock/agent/builder_validator.py +169 -0
  5. flock/agent/component_lifecycle.py +325 -0
  6. flock/agent/context_resolver.py +141 -0
  7. flock/agent/mcp_integration.py +212 -0
  8. flock/agent/output_processor.py +304 -0
  9. flock/api/__init__.py +20 -0
  10. flock/api/models.py +283 -0
  11. flock/{service.py → api/service.py} +121 -63
  12. flock/cli.py +2 -2
  13. flock/components/__init__.py +41 -0
  14. flock/components/agent/__init__.py +22 -0
  15. flock/{components.py → components/agent/base.py} +4 -3
  16. flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
  17. flock/components/orchestrator/__init__.py +22 -0
  18. flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
  19. flock/components/orchestrator/circuit_breaker.py +95 -0
  20. flock/components/orchestrator/collection.py +143 -0
  21. flock/components/orchestrator/deduplication.py +78 -0
  22. flock/core/__init__.py +30 -0
  23. flock/core/agent.py +953 -0
  24. flock/{artifacts.py → core/artifacts.py} +1 -1
  25. flock/{context_provider.py → core/context_provider.py} +3 -3
  26. flock/core/orchestrator.py +1102 -0
  27. flock/{store.py → core/store.py} +99 -454
  28. flock/{subscription.py → core/subscription.py} +1 -1
  29. flock/dashboard/collector.py +5 -5
  30. flock/dashboard/graph_builder.py +7 -7
  31. flock/dashboard/routes/__init__.py +21 -0
  32. flock/dashboard/routes/control.py +327 -0
  33. flock/dashboard/routes/helpers.py +340 -0
  34. flock/dashboard/routes/themes.py +76 -0
  35. flock/dashboard/routes/traces.py +521 -0
  36. flock/dashboard/routes/websocket.py +108 -0
  37. flock/dashboard/service.py +44 -1294
  38. flock/engines/dspy/__init__.py +20 -0
  39. flock/engines/dspy/artifact_materializer.py +216 -0
  40. flock/engines/dspy/signature_builder.py +474 -0
  41. flock/engines/dspy/streaming_executor.py +858 -0
  42. flock/engines/dspy_engine.py +45 -1330
  43. flock/engines/examples/simple_batch_engine.py +2 -2
  44. flock/examples.py +7 -7
  45. flock/logging/logging.py +1 -16
  46. flock/models/__init__.py +10 -0
  47. flock/models/system_artifacts.py +33 -0
  48. flock/orchestrator/__init__.py +45 -0
  49. flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
  50. flock/orchestrator/artifact_manager.py +168 -0
  51. flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
  52. flock/orchestrator/component_runner.py +389 -0
  53. flock/orchestrator/context_builder.py +167 -0
  54. flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
  55. flock/orchestrator/event_emitter.py +167 -0
  56. flock/orchestrator/initialization.py +184 -0
  57. flock/orchestrator/lifecycle_manager.py +226 -0
  58. flock/orchestrator/mcp_manager.py +202 -0
  59. flock/orchestrator/scheduler.py +189 -0
  60. flock/orchestrator/server_manager.py +234 -0
  61. flock/orchestrator/tracing.py +147 -0
  62. flock/storage/__init__.py +10 -0
  63. flock/storage/artifact_aggregator.py +158 -0
  64. flock/storage/in_memory/__init__.py +6 -0
  65. flock/storage/in_memory/artifact_filter.py +114 -0
  66. flock/storage/in_memory/history_aggregator.py +115 -0
  67. flock/storage/sqlite/__init__.py +10 -0
  68. flock/storage/sqlite/agent_history_queries.py +154 -0
  69. flock/storage/sqlite/consumption_loader.py +100 -0
  70. flock/storage/sqlite/query_builder.py +112 -0
  71. flock/storage/sqlite/query_params_builder.py +91 -0
  72. flock/storage/sqlite/schema_manager.py +168 -0
  73. flock/storage/sqlite/summary_queries.py +194 -0
  74. flock/utils/__init__.py +14 -0
  75. flock/utils/async_utils.py +67 -0
  76. flock/{runtime.py → utils/runtime.py} +3 -3
  77. flock/utils/time_utils.py +53 -0
  78. flock/utils/type_resolution.py +38 -0
  79. flock/{utilities.py → utils/utilities.py} +2 -2
  80. flock/utils/validation.py +57 -0
  81. flock/utils/visibility.py +79 -0
  82. flock/utils/visibility_utils.py +134 -0
  83. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/METADATA +69 -61
  84. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/RECORD +89 -31
  85. flock/agent.py +0 -1578
  86. flock/orchestrator.py +0 -1746
  87. /flock/{visibility.py → core/visibility.py} +0 -0
  88. /flock/{helper → utils}/cli_helper.py +0 -0
  89. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/WHEEL +0 -0
  90. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/entry_points.txt +0 -0
  91. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/licenses/LICENSE +0 -0
@@ -8,26 +8,22 @@ Provides real-time dashboard capabilities by:
8
8
  """
9
9
 
10
10
  import os
11
- from datetime import UTC, datetime, timedelta
12
- from importlib.metadata import PackageNotFoundError, version
13
- from pathlib import Path
14
11
  from typing import Any
15
- from uuid import uuid4
16
12
 
17
- from fastapi import HTTPException, WebSocket, WebSocketDisconnect
18
13
  from fastapi.middleware.cors import CORSMiddleware
19
- from fastapi.staticfiles import StaticFiles
20
- from pydantic import ValidationError
21
14
 
15
+ from flock.api.service import BlackboardHTTPService
16
+ from flock.core import Flock
22
17
  from flock.dashboard.collector import DashboardEventCollector
23
- from flock.dashboard.events import MessagePublishedEvent, VisibilitySpec
24
18
  from flock.dashboard.graph_builder import GraphAssembler
25
- from flock.dashboard.models.graph import GraphRequest, GraphSnapshot
19
+ from flock.dashboard.routes import (
20
+ register_control_routes,
21
+ register_theme_routes,
22
+ register_trace_routes,
23
+ register_websocket_routes,
24
+ )
26
25
  from flock.dashboard.websocket import WebSocketManager
27
26
  from flock.logging.logging import get_logger
28
- from flock.orchestrator import Flock
29
- from flock.registry import type_registry
30
- from flock.service import BlackboardHTTPService
31
27
 
32
28
 
33
29
  logger = get_logger("dashboard.service")
@@ -57,6 +53,7 @@ class DashboardHTTPService(BlackboardHTTPService):
57
53
  orchestrator: Flock orchestrator instance
58
54
  websocket_manager: Optional WebSocketManager (creates new if not provided)
59
55
  event_collector: Optional DashboardEventCollector (creates new if not provided)
56
+ use_v2: Whether to use v2 dashboard frontend
60
57
  """
61
58
  # Initialize base service
62
59
  super().__init__(orchestrator)
@@ -89,968 +86,49 @@ class DashboardHTTPService(BlackboardHTTPService):
89
86
 
90
87
  # IMPORTANT: Register API routes BEFORE static files!
91
88
  # Static file mount acts as catch-all and must be last
92
- self._register_control_routes()
93
- self._register_theme_routes()
94
- self._register_dashboard_routes()
89
+ self._register_all_routes()
95
90
 
96
91
  logger.info("DashboardHTTPService initialized")
97
92
 
98
- def _register_dashboard_routes(self) -> None:
99
- """Register WebSocket endpoint and static file serving."""
100
- app = self.app
93
+ def _register_all_routes(self) -> None:
94
+ """Register all dashboard routes using route modules.
101
95
 
102
- @app.websocket("/ws")
103
- async def websocket_endpoint(websocket: WebSocket) -> None:
104
- """WebSocket endpoint for real-time dashboard events.
96
+ Routes are organized into focused modules:
97
+ - control: Control API endpoints (publish, invoke, agents, etc.)
98
+ - traces: Trace-related endpoints (OpenTelemetry, history, etc.)
99
+ - themes: Theme management endpoints
100
+ - websocket: WebSocket and real-time dashboard endpoints
105
101
 
106
- Handles connection lifecycle:
107
- 1. Accept connection
108
- 2. Add to WebSocketManager pool
109
- 3. Keep connection alive
110
- 4. Handle disconnection gracefully
111
- """
112
- await websocket.accept()
113
- await self.websocket_manager.add_client(websocket)
114
-
115
- try:
116
- # Keep connection alive and handle incoming messages
117
- # Dashboard clients may send heartbeat responses or control messages
118
- while True:
119
- # Wait for messages from client (pong responses, etc.)
120
- try:
121
- data = await websocket.receive_text()
122
- # Handle client messages if needed (e.g., pong responses)
123
- # For Phase 3, we primarily broadcast from server to client
124
- logger.debug(f"Received message from client: {data[:100]}")
125
- except WebSocketDisconnect:
126
- logger.info("WebSocket client disconnected")
127
- break
128
- except Exception as e:
129
- logger.warning(f"Error receiving WebSocket message: {e}")
130
- break
131
-
132
- except Exception as e:
133
- logger.exception(f"WebSocket endpoint error: {e}")
134
- finally:
135
- # Clean up: remove client from pool
136
- await self.websocket_manager.remove_client(websocket)
137
-
138
- if self.graph_assembler is not None:
139
-
140
- @app.post("/api/dashboard/graph", response_model=GraphSnapshot)
141
- async def get_dashboard_graph(request: GraphRequest) -> GraphSnapshot:
142
- """Return server-side assembled dashboard graph snapshot."""
143
- return await self.graph_assembler.build_snapshot(request)
144
-
145
- dashboard_dir = Path(__file__).parent
146
- frontend_root = dashboard_dir.parent / (
147
- "frontend_v2" if self.use_v2 else "frontend"
102
+ Route registration order matters - static files must be last!
103
+ """
104
+ # Register control routes (artifact types, agents, version, publish, invoke)
105
+ register_control_routes(
106
+ app=self.app,
107
+ orchestrator=self.orchestrator,
108
+ websocket_manager=self.websocket_manager,
109
+ event_collector=self.event_collector,
148
110
  )
149
- static_dir = dashboard_dir / ("static_v2" if self.use_v2 else "static")
150
-
151
- possible_dirs = [
152
- static_dir,
153
- frontend_root / "dist",
154
- frontend_root / "build",
155
- ]
156
-
157
- for dir_path in possible_dirs:
158
- if dir_path.exists() and dir_path.is_dir():
159
- logger.info(f"Mounting static files from: {dir_path}")
160
- # Mount at root to serve index.html and other frontend assets
161
- app.mount(
162
- "/",
163
- StaticFiles(directory=str(dir_path), html=True),
164
- name="dashboard-static",
165
- )
166
- break
167
- else:
168
- logger.warning(
169
- f"No static directory found for dashboard frontend (expected one of: {possible_dirs})."
170
- )
171
-
172
- def _register_control_routes(self) -> None:
173
- """Register control API endpoints for dashboard operations."""
174
- app = self.app
175
- orchestrator = self.orchestrator
176
-
177
- @app.get("/api/artifact-types")
178
- async def get_artifact_types() -> dict[str, Any]:
179
- """Get all registered artifact types with their schemas.
180
-
181
- Returns:
182
- {
183
- "artifact_types": [
184
- {
185
- "name": "TypeName",
186
- "schema": {...}
187
- },
188
- ...
189
- ]
190
- }
191
- """
192
- artifact_types = []
193
-
194
- for type_name in type_registry._by_name:
195
- try:
196
- model_class = type_registry.resolve(type_name)
197
- # Get Pydantic schema
198
- schema = model_class.model_json_schema()
199
- artifact_types.append({"name": type_name, "schema": schema})
200
- except Exception as e:
201
- logger.warning(f"Could not get schema for {type_name}: {e}")
202
-
203
- return {"artifact_types": artifact_types}
204
-
205
- @app.get("/api/agents")
206
- async def get_agents() -> dict[str, Any]:
207
- """Get all registered agents with logic operations state.
208
-
209
- Phase 1.2 Enhancement: Now includes logic_operations configuration
210
- and waiting state for agents using JoinSpec or BatchSpec.
211
-
212
- Returns:
213
- {
214
- "agents": [
215
- {
216
- "name": "agent_name",
217
- "description": "...",
218
- "status": "ready" | "waiting" | "active",
219
- "subscriptions": ["TypeA", "TypeB"],
220
- "output_types": ["TypeC", "TypeD"],
221
- "logic_operations": [ # NEW: Phase 1.2
222
- {
223
- "subscription_index": 0,
224
- "subscription_types": ["TypeA", "TypeB"],
225
- "join": {...}, # JoinSpec config
226
- "batch": {...}, # BatchSpec config
227
- "waiting_state": {...} # Current state
228
- }
229
- ]
230
- },
231
- ...
232
- ]
233
- }
234
- """
235
- agents = []
236
-
237
- for agent in orchestrator.agents:
238
- # Extract consumed types from agent subscriptions
239
- consumed_types = []
240
- for sub in agent.subscriptions:
241
- consumed_types.extend(sub.type_names)
242
-
243
- # Extract produced types from agent outputs
244
- produced_types = [output.spec.type_name for output in agent.outputs]
245
-
246
- # NEW Phase 1.2: Logic operations configuration
247
- logic_operations = []
248
- for idx, subscription in enumerate(agent.subscriptions):
249
- logic_config = _build_logic_config(
250
- agent, subscription, idx, orchestrator
251
- )
252
- if logic_config: # Only include if has join/batch
253
- logic_operations.append(logic_config)
254
-
255
- agent_data = {
256
- "name": agent.name,
257
- "description": agent.description or "",
258
- "status": _compute_agent_status(
259
- agent, orchestrator
260
- ), # NEW: Dynamic status
261
- "subscriptions": consumed_types,
262
- "output_types": produced_types,
263
- }
264
-
265
- if logic_operations:
266
- agent_data["logic_operations"] = logic_operations
267
-
268
- agents.append(agent_data)
269
-
270
- return {"agents": agents}
271
-
272
- @app.get("/api/version")
273
- async def get_version() -> dict[str, str]:
274
- """Get version information for the backend and dashboard.
275
-
276
- Returns:
277
- {
278
- "backend_version": "0.1.18",
279
- "package_name": "flock-flow"
280
- }
281
- """
282
- try:
283
- backend_version = version("flock-flow")
284
- except PackageNotFoundError:
285
- # Fallback version if package not installed
286
- backend_version = "0.2.0-dev"
287
-
288
- return {"backend_version": backend_version, "package_name": "flock-flow"}
289
-
290
- @app.post("/api/control/publish")
291
- async def publish_artifact(body: dict[str, Any]) -> dict[str, str]:
292
- """Publish artifact with correlation tracking.
293
-
294
- Request body:
295
- {
296
- "artifact_type": "TypeName",
297
- "content": {"field": "value", ...}
298
- }
299
-
300
- Returns:
301
- {
302
- "correlation_id": "<uuid>",
303
- "published_at": "<iso-timestamp>"
304
- }
305
- """
306
- # Validate required fields
307
- artifact_type = body.get("artifact_type")
308
- content = body.get("content")
309
-
310
- if not artifact_type:
311
- raise HTTPException(status_code=400, detail="artifact_type is required")
312
- if content is None:
313
- raise HTTPException(status_code=400, detail="content is required")
314
-
315
- try:
316
- # Resolve type from registry
317
- model_class = type_registry.resolve(artifact_type)
318
-
319
- # Validate content against Pydantic schema
320
- try:
321
- instance = model_class(**content)
322
- except ValidationError as e:
323
- raise HTTPException(
324
- status_code=422, detail=f"Validation error: {e!s}"
325
- )
326
-
327
- # Generate correlation ID
328
- correlation_id = str(uuid4())
329
-
330
- # Publish to orchestrator
331
- artifact = await orchestrator.publish(
332
- instance, correlation_id=correlation_id, is_dashboard=True
333
- )
334
-
335
- # Phase 11 Fix: Emit message_published event for dashboard visibility
336
- # This enables virtual "orchestrator" agent to appear in both Agent View and Blackboard View
337
- event = MessagePublishedEvent(
338
- correlation_id=str(artifact.correlation_id),
339
- artifact_id=str(artifact.id),
340
- artifact_type=artifact.type,
341
- produced_by=artifact.produced_by, # Will be "orchestrator" or similar for non-agent publishers
342
- payload=artifact.payload,
343
- visibility=VisibilitySpec(
344
- kind="Public"
345
- ), # Dashboard-published artifacts are public by default
346
- tags=list(artifact.tags) if artifact.tags else [],
347
- partition_key=artifact.partition_key,
348
- version=artifact.version,
349
- consumers=[], # Will be populated by subscription matching in frontend
350
- )
351
- await self.websocket_manager.broadcast(event)
352
-
353
- return {
354
- "correlation_id": str(artifact.correlation_id),
355
- "published_at": artifact.created_at.isoformat(),
356
- }
357
-
358
- except KeyError:
359
- raise HTTPException(
360
- status_code=422, detail=f"Unknown artifact type: {artifact_type}"
361
- )
362
- except Exception as e:
363
- logger.exception(f"Error publishing artifact: {e}")
364
- raise HTTPException(status_code=500, detail=str(e))
365
-
366
- @app.post("/api/control/invoke")
367
- async def invoke_agent(body: dict[str, Any]) -> dict[str, Any]:
368
- """Directly invoke a specific agent.
369
-
370
- Request body:
371
- {
372
- "agent_name": "agent_name",
373
- "input": {"type": "TypeName", "field": "value", ...}
374
- }
375
-
376
- Returns:
377
- {
378
- "invocation_id": "<uuid>",
379
- "result": "success"
380
- }
381
- """
382
- # Validate required fields
383
- agent_name = body.get("agent_name")
384
- input_data = body.get("input")
385
-
386
- if not agent_name:
387
- raise HTTPException(status_code=400, detail="agent_name is required")
388
- if input_data is None:
389
- raise HTTPException(status_code=400, detail="input is required")
390
-
391
- try:
392
- # Get agent from orchestrator
393
- agent = orchestrator.get_agent(agent_name)
394
- except KeyError:
395
- raise HTTPException(
396
- status_code=404, detail=f"Agent not found: {agent_name}"
397
- )
398
-
399
- try:
400
- # Parse input type and create instance
401
- input_type = input_data.get("type")
402
- if not input_type:
403
- raise HTTPException(
404
- status_code=400, detail="input.type is required"
405
- )
406
-
407
- # Resolve type from registry
408
- model_class = type_registry.resolve(input_type)
409
-
410
- # Create payload by removing 'type' key
411
- payload = {k: v for k, v in input_data.items() if k != "type"}
412
-
413
- # Validate and create instance
414
- try:
415
- instance = model_class(**payload)
416
- except ValidationError as e:
417
- raise HTTPException(
418
- status_code=422, detail=f"Validation error: {e!s}"
419
- )
420
-
421
- # Invoke agent
422
- outputs = await orchestrator.invoke(agent, instance)
423
-
424
- # Generate invocation ID from first output or create new UUID
425
- invocation_id = str(outputs[0].id) if outputs else str(uuid4())
426
-
427
- # Extract correlation_id from first output (for filter automation)
428
- correlation_id = (
429
- str(outputs[0].correlation_id)
430
- if outputs and outputs[0].correlation_id
431
- else None
432
- )
433
-
434
- return {
435
- "invocation_id": invocation_id,
436
- "correlation_id": correlation_id,
437
- "result": "success",
438
- }
439
-
440
- except HTTPException:
441
- raise
442
- except KeyError:
443
- raise HTTPException(
444
- status_code=422, detail=f"Unknown type: {input_type}"
445
- )
446
- except Exception as e:
447
- logger.exception(f"Error invoking agent: {e}")
448
- raise HTTPException(status_code=500, detail=str(e))
449
-
450
- @app.post("/api/control/pause")
451
- async def pause_orchestrator() -> dict[str, Any]:
452
- """Pause orchestrator (placeholder).
453
-
454
- Returns:
455
- 501 Not Implemented
456
- """
457
- raise HTTPException(
458
- status_code=501, detail="Pause functionality coming in Phase 12"
459
- )
460
-
461
- @app.post("/api/control/resume")
462
- async def resume_orchestrator() -> dict[str, Any]:
463
- """Resume orchestrator (placeholder).
464
-
465
- Returns:
466
- 501 Not Implemented
467
- """
468
- raise HTTPException(
469
- status_code=501, detail="Resume functionality coming in Phase 12"
470
- )
471
-
472
- @app.get("/api/traces")
473
- async def get_traces() -> list[dict[str, Any]]:
474
- """Get OpenTelemetry traces from DuckDB.
475
-
476
- Returns list of trace spans in OTEL format.
477
-
478
- Returns:
479
- [
480
- {
481
- "name": "Agent.execute",
482
- "context": {
483
- "trace_id": "...",
484
- "span_id": "...",
485
- ...
486
- },
487
- "start_time": 1234567890,
488
- "end_time": 1234567891,
489
- "attributes": {...},
490
- "status": {...}
491
- },
492
- ...
493
- ]
494
- """
495
- import json
496
- from pathlib import Path
497
-
498
- import duckdb
499
-
500
- db_path = Path(".flock/traces.duckdb")
501
-
502
- if not db_path.exists():
503
- logger.warning(
504
- "Trace database not found. Make sure FLOCK_AUTO_TRACE=true FLOCK_TRACE_FILE=true"
505
- )
506
- return []
507
-
508
- try:
509
- with duckdb.connect(str(db_path), read_only=True) as conn:
510
- # Query all spans from DuckDB
511
- result = conn.execute("""
512
- SELECT
513
- trace_id, span_id, parent_id, name, service, operation,
514
- kind, start_time, end_time, duration_ms,
515
- status_code, status_description,
516
- attributes, events, links, resource
517
- FROM spans
518
- ORDER BY start_time DESC
519
- """).fetchall()
520
-
521
- spans = []
522
- for row in result:
523
- # Reconstruct OTEL span format from DuckDB row
524
- span = {
525
- "name": row[3], # name
526
- "context": {
527
- "trace_id": row[0], # trace_id
528
- "span_id": row[1], # span_id
529
- "trace_flags": 0,
530
- "trace_state": "",
531
- },
532
- "kind": row[6], # kind
533
- "start_time": row[7], # start_time
534
- "end_time": row[8], # end_time
535
- "status": {
536
- "status_code": row[10], # status_code
537
- "description": row[11], # status_description
538
- },
539
- "attributes": json.loads(row[12])
540
- if row[12]
541
- else {}, # attributes
542
- "events": json.loads(row[13]) if row[13] else [], # events
543
- "links": json.loads(row[14]) if row[14] else [], # links
544
- "resource": json.loads(row[15])
545
- if row[15]
546
- else {}, # resource
547
- }
548
-
549
- # Add parent_id if exists
550
- if row[2]: # parent_id
551
- span["parent_id"] = row[2]
552
-
553
- spans.append(span)
554
-
555
- logger.debug(f"Loaded {len(spans)} spans from DuckDB")
556
- return spans
557
-
558
- except Exception as e:
559
- logger.exception(f"Error reading traces from DuckDB: {e}")
560
- return []
561
-
562
- @app.get("/api/traces/services")
563
- async def get_trace_services() -> dict[str, Any]:
564
- """Get list of unique services that have been traced.
565
-
566
- Returns:
567
- {
568
- "services": ["Flock", "Agent", "DSPyEngine", ...],
569
- "operations": ["Flock.publish", "Agent.execute", ...]
570
- }
571
- """
572
- from pathlib import Path
573
-
574
- import duckdb
575
-
576
- db_path = Path(".flock/traces.duckdb")
577
-
578
- if not db_path.exists():
579
- return {"services": [], "operations": []}
580
-
581
- try:
582
- with duckdb.connect(str(db_path), read_only=True) as conn:
583
- # Get unique services
584
- services_result = conn.execute("""
585
- SELECT DISTINCT service
586
- FROM spans
587
- WHERE service IS NOT NULL
588
- ORDER BY service
589
- """).fetchall()
590
-
591
- # Get unique operations
592
- operations_result = conn.execute("""
593
- SELECT DISTINCT name
594
- FROM spans
595
- WHERE name IS NOT NULL
596
- ORDER BY name
597
- """).fetchall()
598
-
599
- return {
600
- "services": [row[0] for row in services_result],
601
- "operations": [row[0] for row in operations_result],
602
- }
603
-
604
- except Exception as e:
605
- logger.exception(f"Error reading trace services: {e}")
606
- return {"services": [], "operations": []}
607
-
608
- @app.post("/api/traces/clear")
609
- async def clear_traces() -> dict[str, Any]:
610
- """Clear all traces from DuckDB database.
611
-
612
- Returns:
613
- {
614
- "success": true,
615
- "deleted_count": 123,
616
- "error": null
617
- }
618
- """
619
- result = Flock.clear_traces()
620
- if result["success"]:
621
- logger.info(f"Cleared {result['deleted_count']} trace spans via API")
622
- else:
623
- logger.error(f"Failed to clear traces: {result['error']}")
624
-
625
- return result
626
-
627
- @app.post("/api/traces/query")
628
- async def execute_trace_query(request: dict[str, Any]) -> dict[str, Any]:
629
- """
630
- Execute a DuckDB SQL query on the traces database.
631
-
632
- Security: Only SELECT queries allowed, rate-limited.
633
- """
634
- from pathlib import Path
635
-
636
- import duckdb
637
-
638
- query = request.get("query", "").strip()
639
-
640
- if not query:
641
- return {"error": "Query cannot be empty", "results": [], "columns": []}
642
-
643
- # Security: Only allow SELECT queries
644
- query_upper = query.upper().strip()
645
- if not query_upper.startswith("SELECT"):
646
- return {
647
- "error": "Only SELECT queries are allowed",
648
- "results": [],
649
- "columns": [],
650
- }
651
-
652
- # Check for dangerous keywords
653
- dangerous = [
654
- "DROP",
655
- "DELETE",
656
- "INSERT",
657
- "UPDATE",
658
- "ALTER",
659
- "CREATE",
660
- "TRUNCATE",
661
- ]
662
- if any(keyword in query_upper for keyword in dangerous):
663
- return {
664
- "error": "Query contains forbidden operations",
665
- "results": [],
666
- "columns": [],
667
- }
668
-
669
- db_path = Path(".flock/traces.duckdb")
670
- if not db_path.exists():
671
- return {
672
- "error": "Trace database not found",
673
- "results": [],
674
- "columns": [],
675
- }
676
-
677
- try:
678
- with duckdb.connect(str(db_path), read_only=True) as conn:
679
- result = conn.execute(query).fetchall()
680
- columns = (
681
- [desc[0] for desc in conn.description]
682
- if conn.description
683
- else []
684
- )
685
-
686
- # Convert to JSON-serializable format
687
- results = []
688
- for row in result:
689
- row_dict = {}
690
- for i, col in enumerate(columns):
691
- val = row[i]
692
- # Convert bytes to string, handle other types
693
- if isinstance(val, bytes):
694
- row_dict[col] = val.decode("utf-8")
695
- else:
696
- row_dict[col] = val
697
- results.append(row_dict)
698
-
699
- return {
700
- "results": results,
701
- "columns": columns,
702
- "row_count": len(results),
703
- }
704
- except Exception as e:
705
- logger.exception(f"DuckDB query error: {e}")
706
- return {"error": str(e), "results": [], "columns": []}
707
-
708
- @app.get("/api/traces/stats")
709
- async def get_trace_stats() -> dict[str, Any]:
710
- """Get statistics about the trace database.
711
-
712
- Returns:
713
- {
714
- "total_spans": 123,
715
- "total_traces": 45,
716
- "services_count": 5,
717
- "oldest_trace": "2025-10-07T12:00:00Z",
718
- "newest_trace": "2025-10-07T14:30:00Z",
719
- "database_size_mb": 12.5
720
- }
721
- """
722
- from datetime import datetime
723
- from pathlib import Path
724
-
725
- import duckdb
726
-
727
- db_path = Path(".flock/traces.duckdb")
728
-
729
- if not db_path.exists():
730
- return {
731
- "total_spans": 0,
732
- "total_traces": 0,
733
- "services_count": 0,
734
- "oldest_trace": None,
735
- "newest_trace": None,
736
- "database_size_mb": 0,
737
- }
738
-
739
- try:
740
- with duckdb.connect(str(db_path), read_only=True) as conn:
741
- # Get total spans
742
- total_spans = conn.execute("SELECT COUNT(*) FROM spans").fetchone()[
743
- 0
744
- ]
745
-
746
- # Get total unique traces
747
- total_traces = conn.execute(
748
- "SELECT COUNT(DISTINCT trace_id) FROM spans"
749
- ).fetchone()[0]
750
-
751
- # Get services count
752
- services_count = conn.execute(
753
- "SELECT COUNT(DISTINCT service) FROM spans WHERE service IS NOT NULL"
754
- ).fetchone()[0]
755
-
756
- # Get time range
757
- time_range = conn.execute("""
758
- SELECT
759
- MIN(start_time) as oldest,
760
- MAX(start_time) as newest
761
- FROM spans
762
- """).fetchone()
763
111
 
764
- oldest_trace = None
765
- newest_trace = None
766
- if time_range and time_range[0]:
767
- # Convert nanoseconds to datetime
768
- oldest_trace = datetime.fromtimestamp(
769
- time_range[0] / 1_000_000_000, tz=UTC
770
- ).isoformat()
771
- newest_trace = datetime.fromtimestamp(
772
- time_range[1] / 1_000_000_000, tz=UTC
773
- ).isoformat()
774
-
775
- # Get file size
776
- size_mb = db_path.stat().st_size / (1024 * 1024)
777
-
778
- return {
779
- "total_spans": total_spans,
780
- "total_traces": total_traces,
781
- "services_count": services_count,
782
- "oldest_trace": oldest_trace,
783
- "newest_trace": newest_trace,
784
- "database_size_mb": round(size_mb, 2),
785
- }
786
-
787
- except Exception as e:
788
- logger.exception(f"Error reading trace stats: {e}")
789
- return {
790
- "total_spans": 0,
791
- "total_traces": 0,
792
- "services_count": 0,
793
- "oldest_trace": None,
794
- "newest_trace": None,
795
- "database_size_mb": 0,
796
- }
797
-
798
- @app.get("/api/streaming-history/{agent_name}")
799
- async def get_streaming_history(agent_name: str) -> dict[str, Any]:
800
- """Get historical streaming output for a specific agent.
801
-
802
- Args:
803
- agent_name: Name of the agent to get streaming history for
804
-
805
- Returns:
806
- {
807
- "agent_name": "agent_name",
808
- "events": [
809
- {
810
- "correlation_id": "...",
811
- "timestamp": "...",
812
- "agent_name": "...",
813
- "run_id": "...",
814
- "output_type": "llm_token",
815
- "content": "...",
816
- "sequence": 0,
817
- "is_final": false
818
- },
819
- ...
820
- ]
821
- }
822
- """
823
- try:
824
- history = self.websocket_manager.get_streaming_history(agent_name)
825
- return {
826
- "agent_name": agent_name,
827
- "events": [event.model_dump() for event in history],
828
- }
829
- except Exception as e:
830
- logger.exception(
831
- f"Failed to get streaming history for {agent_name}: {e}"
832
- )
833
- raise HTTPException(
834
- status_code=500, detail=f"Failed to get streaming history: {e!s}"
835
- )
836
-
837
- @app.get("/api/artifacts/history/{node_id}")
838
- async def get_message_history(node_id: str) -> dict[str, Any]:
839
- """Get complete message history for a node (both produced and consumed).
840
-
841
- Phase 4.1 Feature Gap Fix: Returns both messages produced by AND consumed by
842
- the specified node, enabling complete message history view in MessageHistoryTab.
843
-
844
- Args:
845
- node_id: ID of the node (agent name or message ID)
846
-
847
- Returns:
848
- {
849
- "node_id": "agent_name",
850
- "messages": [
851
- {
852
- "id": "artifact-uuid",
853
- "type": "ArtifactType",
854
- "direction": "published"|"consumed",
855
- "payload": {...},
856
- "timestamp": "2025-10-11T...",
857
- "correlation_id": "uuid",
858
- "produced_by": "producer_name",
859
- "consumed_at": "2025-10-11T..." (only for consumed)
860
- },
861
- ...
862
- ],
863
- "total": 123
864
- }
865
- """
866
- try:
867
- from flock.store import FilterConfig
868
-
869
- messages = []
870
-
871
- # 1. Get messages PRODUCED by this node
872
- produced_filter = FilterConfig(produced_by={node_id})
873
- (
874
- produced_artifacts,
875
- _produced_count,
876
- ) = await orchestrator.store.query_artifacts(
877
- produced_filter, limit=100, offset=0, embed_meta=False
878
- )
879
-
880
- for artifact in produced_artifacts:
881
- messages.append({
882
- "id": str(artifact.id),
883
- "type": artifact.type,
884
- "direction": "published",
885
- "payload": artifact.payload,
886
- "timestamp": artifact.created_at.isoformat(),
887
- "correlation_id": str(artifact.correlation_id)
888
- if artifact.correlation_id
889
- else None,
890
- "produced_by": artifact.produced_by,
891
- })
892
-
893
- # 2. Get messages CONSUMED by this node
894
- # Query all artifacts with consumption metadata
895
- all_artifacts_filter = FilterConfig() # No filter = all artifacts
896
- all_envelopes, _ = await orchestrator.store.query_artifacts(
897
- all_artifacts_filter, limit=500, offset=0, embed_meta=True
898
- )
899
-
900
- for envelope in all_envelopes:
901
- artifact = envelope.artifact
902
- for consumption in envelope.consumptions:
903
- if consumption.consumer == node_id:
904
- messages.append({
905
- "id": str(artifact.id),
906
- "type": artifact.type,
907
- "direction": "consumed",
908
- "payload": artifact.payload,
909
- "timestamp": artifact.created_at.isoformat(),
910
- "correlation_id": str(artifact.correlation_id)
911
- if artifact.correlation_id
912
- else None,
913
- "produced_by": artifact.produced_by,
914
- "consumed_at": consumption.consumed_at.isoformat(),
915
- })
916
-
917
- # Sort by timestamp (most recent first)
918
- messages.sort(
919
- key=lambda m: m.get("consumed_at", m["timestamp"]), reverse=True
920
- )
921
-
922
- return {
923
- "node_id": node_id,
924
- "messages": messages,
925
- "total": len(messages),
926
- }
927
-
928
- except Exception as e:
929
- logger.exception(f"Failed to get message history for {node_id}: {e}")
930
- raise HTTPException(
931
- status_code=500, detail=f"Failed to get message history: {e!s}"
932
- )
933
-
934
- @app.get("/api/agents/{agent_id}/runs")
935
- async def get_agent_runs(agent_id: str) -> dict[str, Any]:
936
- """Get run history for an agent.
937
-
938
- Phase 4.1 Feature Gap Fix: Returns agent execution history with metrics
939
- for display in RunStatusTab.
940
-
941
- Args:
942
- agent_id: ID of the agent
943
-
944
- Returns:
945
- {
946
- "agent_id": "agent_name",
947
- "runs": [
948
- {
949
- "run_id": "uuid",
950
- "start_time": "2025-10-11T...",
951
- "end_time": "2025-10-11T...",
952
- "duration_ms": 1234,
953
- "status": "completed"|"active"|"error",
954
- "metrics": {
955
- "tokens_used": 123,
956
- "cost_usd": 0.0012,
957
- "artifacts_produced": 5
958
- },
959
- "error_message": "error details" (if status=error)
960
- },
961
- ...
962
- ],
963
- "total": 50
964
- }
965
- """
966
- try:
967
- # TODO: Implement run history tracking in orchestrator
968
- # For now, return empty array with proper structure
969
- # This unblocks frontend development and can be enhanced later
970
-
971
- runs = []
972
-
973
- # FUTURE: Query run history from orchestrator or store
974
- # Example implementation when run tracking is added:
975
- # runs = await orchestrator.get_agent_run_history(agent_id, limit=50)
976
-
977
- return {"agent_id": agent_id, "runs": runs, "total": len(runs)}
978
-
979
- except Exception as e:
980
- logger.exception(f"Failed to get run history for {agent_id}: {e}")
981
- raise HTTPException(
982
- status_code=500, detail=f"Failed to get run history: {e!s}"
983
- )
984
-
985
- def _register_theme_routes(self) -> None:
986
- """Register theme API endpoints for dashboard customization."""
987
- from pathlib import Path
988
-
989
- import toml
990
-
991
- app = self.app
992
- themes_dir = Path(__file__).parent.parent / "themes"
993
-
994
- @app.get("/api/themes")
995
- async def list_themes() -> dict[str, Any]:
996
- """Get list of available theme names.
997
-
998
- Returns:
999
- {"themes": ["dracula", "nord", ...]}
1000
- """
1001
- try:
1002
- if not themes_dir.exists():
1003
- return {"themes": []}
1004
-
1005
- theme_files = list(themes_dir.glob("*.toml"))
1006
- theme_names = sorted([f.stem for f in theme_files])
1007
-
1008
- return {"themes": theme_names}
1009
- except Exception as e:
1010
- logger.exception(f"Failed to list themes: {e}")
1011
- raise HTTPException(
1012
- status_code=500, detail=f"Failed to list themes: {e!s}"
1013
- )
1014
-
1015
- @app.get("/api/themes/{theme_name}")
1016
- async def get_theme(theme_name: str) -> dict[str, Any]:
1017
- """Get theme data by name.
1018
-
1019
- Args:
1020
- theme_name: Name of theme (without .toml extension)
1021
-
1022
- Returns:
1023
- {
1024
- "name": "dracula",
1025
- "data": {
1026
- "colors": {...}
1027
- }
1028
- }
1029
- """
1030
- try:
1031
- # Sanitize theme name to prevent path traversal
1032
- theme_name = (
1033
- theme_name.replace("/", "").replace("\\", "").replace("..", "")
1034
- )
1035
-
1036
- theme_path = themes_dir / f"{theme_name}.toml"
1037
-
1038
- if not theme_path.exists():
1039
- raise HTTPException(
1040
- status_code=404, detail=f"Theme '{theme_name}' not found"
1041
- )
1042
-
1043
- # Load TOML theme
1044
- theme_data = toml.load(theme_path)
112
+ # Register trace routes (traces, services, stats, query, streaming, history)
113
+ register_trace_routes(
114
+ app=self.app,
115
+ orchestrator=self.orchestrator,
116
+ websocket_manager=self.websocket_manager,
117
+ event_collector=self.event_collector,
118
+ )
1045
119
 
1046
- return {"name": theme_name, "data": theme_data}
1047
- except HTTPException:
1048
- raise
1049
- except Exception as e:
1050
- logger.exception(f"Failed to load theme '{theme_name}': {e}")
1051
- raise HTTPException(
1052
- status_code=500, detail=f"Failed to load theme: {e!s}"
1053
- )
120
+ # Register theme routes (list, get)
121
+ register_theme_routes(app=self.app)
122
+
123
+ # Register WebSocket endpoint and static files (must be last!)
124
+ register_websocket_routes(
125
+ app=self.app,
126
+ orchestrator=self.orchestrator,
127
+ websocket_manager=self.websocket_manager,
128
+ event_collector=self.event_collector,
129
+ graph_assembler=self.graph_assembler,
130
+ use_v2=self.use_v2,
131
+ )
1054
132
 
1055
133
  async def start(self) -> None:
1056
134
  """Start the dashboard service.
@@ -1080,332 +158,4 @@ class DashboardHTTPService(BlackboardHTTPService):
1080
158
  return self.app
1081
159
 
1082
160
 
1083
- def _get_correlation_groups(
1084
- engine: "CorrelationEngine", # noqa: F821
1085
- agent_name: str,
1086
- subscription_index: int,
1087
- ) -> list[dict[str, Any]]:
1088
- """Extract correlation group state from CorrelationEngine.
1089
-
1090
- Returns waiting state for all correlation groups for the given agent subscription.
1091
- Used by enhanced /api/agents endpoint to expose JoinSpec waiting state.
1092
-
1093
- Args:
1094
- engine: CorrelationEngine instance from orchestrator
1095
- agent_name: Name of the agent
1096
- subscription_index: Index of the subscription (for agents with multiple subscriptions)
1097
-
1098
- Returns:
1099
- List of correlation group states with progress metrics:
1100
- [
1101
- {
1102
- "correlation_key": "patient_123",
1103
- "created_at": "2025-10-13T14:30:00Z",
1104
- "elapsed_seconds": 45.2,
1105
- "expires_in_seconds": 254.8, # For time windows
1106
- "expires_in_artifacts": 7, # For count windows
1107
- "collected_types": {"XRayImage": 1, "LabResults": 0},
1108
- "required_types": {"XRayImage": 1, "LabResults": 1},
1109
- "waiting_for": ["LabResults"],
1110
- "is_complete": False,
1111
- "is_expired": False
1112
- },
1113
- ...
1114
- ]
1115
- """
1116
-
1117
- pool_key = (agent_name, subscription_index)
1118
- groups = engine.correlation_groups.get(pool_key, {})
1119
-
1120
- if not groups:
1121
- return []
1122
-
1123
- now = datetime.now(UTC)
1124
- result = []
1125
-
1126
- for corr_key, group in groups.items():
1127
- # Calculate elapsed time
1128
- if group.created_at_time:
1129
- created_at_time = group.created_at_time
1130
- if created_at_time.tzinfo is None:
1131
- created_at_time = created_at_time.replace(tzinfo=UTC)
1132
- elapsed = (now - created_at_time).total_seconds()
1133
- else:
1134
- elapsed = 0
1135
-
1136
- # Calculate time remaining (for time windows)
1137
- expires_in_seconds = None
1138
- if isinstance(group.window_spec, timedelta):
1139
- window_seconds = group.window_spec.total_seconds()
1140
- expires_in_seconds = max(0, window_seconds - elapsed)
1141
-
1142
- # Calculate artifact count remaining (for count windows)
1143
- expires_in_artifacts = None
1144
- if isinstance(group.window_spec, int):
1145
- artifacts_passed = engine.global_sequence - group.created_at_sequence
1146
- expires_in_artifacts = max(0, group.window_spec - artifacts_passed)
1147
-
1148
- # Determine what we're waiting for
1149
- collected_types = {
1150
- type_name: len(group.waiting_artifacts.get(type_name, []))
1151
- for type_name in group.required_types
1152
- }
1153
-
1154
- waiting_for = [
1155
- type_name
1156
- for type_name, required_count in group.type_counts.items()
1157
- if collected_types.get(type_name, 0) < required_count
1158
- ]
1159
-
1160
- result.append({
1161
- "correlation_key": str(corr_key),
1162
- "created_at": group.created_at_time.isoformat()
1163
- if group.created_at_time
1164
- else None,
1165
- "elapsed_seconds": round(elapsed, 1),
1166
- "expires_in_seconds": round(expires_in_seconds, 1)
1167
- if expires_in_seconds is not None
1168
- else None,
1169
- "expires_in_artifacts": expires_in_artifacts,
1170
- "collected_types": collected_types,
1171
- "required_types": dict(group.type_counts),
1172
- "waiting_for": waiting_for,
1173
- "is_complete": group.is_complete(),
1174
- "is_expired": group.is_expired(engine.global_sequence),
1175
- })
1176
-
1177
- return result
1178
-
1179
-
1180
- def _get_batch_state(
1181
- engine: "BatchEngine", # noqa: F821
1182
- agent_name: str,
1183
- subscription_index: int,
1184
- batch_spec: "BatchSpec", # noqa: F821
1185
- ) -> dict[str, Any] | None:
1186
- """Extract batch state from BatchEngine.
1187
-
1188
- Returns current batch accumulator state for the given agent subscription.
1189
- Used by enhanced /api/agents endpoint to expose BatchSpec waiting state.
1190
-
1191
- Args:
1192
- engine: BatchEngine instance from orchestrator
1193
- agent_name: Name of the agent
1194
- subscription_index: Index of the subscription
1195
- batch_spec: BatchSpec configuration (needed for metrics)
1196
-
1197
- Returns:
1198
- Batch state dict or None if no batch or batch is empty:
1199
- {
1200
- "created_at": "2025-10-13T14:30:00Z",
1201
- "elapsed_seconds": 12.5,
1202
- "items_collected": 18,
1203
- "items_target": 25,
1204
- "items_remaining": 7,
1205
- "timeout_seconds": 30,
1206
- "timeout_remaining_seconds": 17.5,
1207
- "will_flush": "on_size" | "on_timeout" | "unknown"
1208
- }
1209
- """
1210
-
1211
- batch_key = (agent_name, subscription_index)
1212
- accumulator = engine.batches.get(batch_key)
1213
-
1214
- # Return None if no batch or batch is empty
1215
- if not accumulator or not accumulator.artifacts:
1216
- return None
1217
-
1218
- now = datetime.now(UTC)
1219
- # Ensure accumulator.created_at is timezone-aware
1220
- created_at = accumulator.created_at
1221
- if created_at.tzinfo is None:
1222
- created_at = created_at.replace(tzinfo=UTC)
1223
- elapsed = (now - created_at).total_seconds()
1224
-
1225
- # Calculate items collected (needed for all batch types)
1226
- items_collected = len(accumulator.artifacts)
1227
- # For group batching, use _group_count if available
1228
- if hasattr(accumulator, "_group_count"):
1229
- items_collected = accumulator._group_count
1230
-
1231
- result = {
1232
- "created_at": accumulator.created_at.isoformat(),
1233
- "elapsed_seconds": round(elapsed, 1),
1234
- "items_collected": items_collected, # Always include for all batch types
1235
- }
1236
-
1237
- # Size-based metrics (only if size threshold configured)
1238
- if batch_spec.size:
1239
- result["items_target"] = batch_spec.size
1240
- result["items_remaining"] = max(0, batch_spec.size - items_collected)
1241
- else:
1242
- # Timeout-only batches: no target
1243
- result["items_target"] = None
1244
- result["items_remaining"] = None
1245
-
1246
- # Timeout-based metrics
1247
- if batch_spec.timeout:
1248
- timeout_seconds = batch_spec.timeout.total_seconds()
1249
- timeout_remaining = max(0, timeout_seconds - elapsed)
1250
-
1251
- result["timeout_seconds"] = int(timeout_seconds)
1252
- result["timeout_remaining_seconds"] = round(timeout_remaining, 1)
1253
-
1254
- # Determine what will trigger flush
1255
- if batch_spec.size and batch_spec.timeout:
1256
- # Hybrid: predict which will fire first based on progress percentages
1257
- items_collected = result["items_collected"]
1258
- items_target = result.get("items_target", 1)
1259
- timeout_remaining = result.get("timeout_remaining_seconds", 0)
1260
-
1261
- # Calculate progress toward each threshold
1262
- size_progress = items_collected / items_target if items_target > 0 else 0
1263
- timeout_elapsed = elapsed
1264
- timeout_total = batch_spec.timeout.total_seconds()
1265
- time_progress = timeout_elapsed / timeout_total if timeout_total > 0 else 0
1266
-
1267
- # Predict based on which threshold we're progressing toward faster
1268
- # If we're closer to size threshold (percentage-wise), predict size
1269
- # Otherwise predict timeout
1270
- if size_progress > time_progress:
1271
- result["will_flush"] = "on_size"
1272
- else:
1273
- result["will_flush"] = "on_timeout"
1274
- elif batch_spec.size:
1275
- result["will_flush"] = "on_size"
1276
- elif batch_spec.timeout:
1277
- result["will_flush"] = "on_timeout"
1278
-
1279
- return result
1280
-
1281
-
1282
- def _compute_agent_status(agent: "Agent", orchestrator: "Flock") -> str: # noqa: F821
1283
- """Determine agent status based on waiting state.
1284
-
1285
- Checks if agent is waiting for correlation or batch completion.
1286
- Used by enhanced /api/agents endpoint to show agent status.
1287
-
1288
- Args:
1289
- agent: Agent instance
1290
- orchestrator: Flock orchestrator instance
1291
-
1292
- Returns:
1293
- "ready" - Agent not waiting for anything
1294
- "waiting" - Agent has correlation groups or batches accumulating
1295
- "active" - Agent currently executing (future enhancement)
1296
- """
1297
- # Check if any subscription is waiting for correlation or batching
1298
- for idx, subscription in enumerate(agent.subscriptions):
1299
- if subscription.join:
1300
- pool_key = (agent.name, idx)
1301
- if pool_key in orchestrator._correlation_engine.correlation_groups:
1302
- groups = orchestrator._correlation_engine.correlation_groups[pool_key]
1303
- if groups: # Has waiting correlation groups
1304
- return "waiting"
1305
-
1306
- if subscription.batch:
1307
- batch_key = (agent.name, idx)
1308
- if batch_key in orchestrator._batch_engine.batches:
1309
- accumulator = orchestrator._batch_engine.batches[batch_key]
1310
- if accumulator and accumulator.artifacts:
1311
- return "waiting"
1312
-
1313
- return "ready"
1314
-
1315
-
1316
- def _build_logic_config( # noqa: F821
1317
- agent: "Agent", # noqa: F821
1318
- subscription: "Subscription", # noqa: F821
1319
- idx: int,
1320
- orchestrator: "Flock",
1321
- ) -> dict[str, Any] | None:
1322
- """Build logic operations configuration for a subscription.
1323
-
1324
- Phase 1.2: Extracts JoinSpec and BatchSpec configuration plus current
1325
- waiting state for agents using logic operations.
1326
-
1327
- Args:
1328
- agent: Agent instance
1329
- subscription: Subscription to analyze
1330
- idx: Subscription index (for agents with multiple subscriptions)
1331
- orchestrator: Flock orchestrator instance
1332
-
1333
- Returns:
1334
- Logic operations config dict or None if no join/batch:
1335
- {
1336
- "subscription_index": 0,
1337
- "subscription_types": ["XRayImage", "LabResults"],
1338
- "join": {...}, # JoinSpec config (if present)
1339
- "batch": {...}, # BatchSpec config (if present)
1340
- "waiting_state": {...} # Current state (if waiting)
1341
- }
1342
- """
1343
- if not subscription.join and not subscription.batch:
1344
- return None
1345
-
1346
- config = {
1347
- "subscription_index": idx,
1348
- "subscription_types": list(subscription.type_names),
1349
- }
1350
-
1351
- # JoinSpec configuration
1352
- if subscription.join:
1353
- join_spec = subscription.join
1354
- window_type = "time" if isinstance(join_spec.within, timedelta) else "count"
1355
- window_value = (
1356
- int(join_spec.within.total_seconds())
1357
- if isinstance(join_spec.within, timedelta)
1358
- else join_spec.within
1359
- )
1360
-
1361
- config["join"] = {
1362
- "correlation_strategy": "by_key",
1363
- "window_type": window_type,
1364
- "window_value": window_value,
1365
- "window_unit": "seconds" if window_type == "time" else "artifacts",
1366
- "required_types": list(subscription.type_names),
1367
- "type_counts": dict(subscription.type_counts),
1368
- }
1369
-
1370
- # Get waiting state from CorrelationEngine
1371
- correlation_groups = _get_correlation_groups(
1372
- orchestrator._correlation_engine, agent.name, idx
1373
- )
1374
- if correlation_groups:
1375
- config["waiting_state"] = {
1376
- "is_waiting": True,
1377
- "correlation_groups": correlation_groups,
1378
- }
1379
-
1380
- # BatchSpec configuration
1381
- if subscription.batch:
1382
- batch_spec = subscription.batch
1383
- strategy = (
1384
- "hybrid"
1385
- if batch_spec.size and batch_spec.timeout
1386
- else "size"
1387
- if batch_spec.size
1388
- else "timeout"
1389
- )
1390
-
1391
- config["batch"] = {
1392
- "strategy": strategy,
1393
- }
1394
- if batch_spec.size:
1395
- config["batch"]["size"] = batch_spec.size
1396
- if batch_spec.timeout:
1397
- config["batch"]["timeout_seconds"] = int(batch_spec.timeout.total_seconds())
1398
-
1399
- # Get waiting state from BatchEngine
1400
- batch_state = _get_batch_state(
1401
- orchestrator._batch_engine, agent.name, idx, batch_spec
1402
- )
1403
- if batch_state:
1404
- if "waiting_state" not in config:
1405
- config["waiting_state"] = {"is_waiting": True}
1406
- config["waiting_state"]["batch_state"] = batch_state
1407
-
1408
- return config
1409
-
1410
-
1411
161
  __all__ = ["DashboardHTTPService"]