flock-core 0.5.0b71__py3-none-any.whl → 0.5.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/agent.py +39 -1
- flock/artifacts.py +17 -10
- flock/cli.py +1 -1
- flock/dashboard/__init__.py +2 -0
- flock/dashboard/collector.py +282 -6
- flock/dashboard/events.py +6 -0
- flock/dashboard/graph_builder.py +563 -0
- flock/dashboard/launcher.py +11 -6
- flock/dashboard/models/__init__.py +1 -0
- flock/dashboard/models/graph.py +156 -0
- flock/dashboard/service.py +175 -14
- flock/dashboard/static_v2/assets/index-DFRnI_mt.js +111 -0
- flock/dashboard/static_v2/assets/index-fPLNdmp1.css +1 -0
- flock/dashboard/static_v2/index.html +13 -0
- flock/dashboard/websocket.py +2 -2
- flock/engines/dspy_engine.py +294 -20
- flock/frontend/README.md +6 -6
- flock/frontend/src/App.tsx +23 -31
- flock/frontend/src/__tests__/integration/graph-snapshot.test.tsx +647 -0
- flock/frontend/src/components/details/DetailWindowContainer.tsx +13 -17
- flock/frontend/src/components/details/MessageDetailWindow.tsx +439 -0
- flock/frontend/src/components/details/MessageHistoryTab.tsx +128 -53
- flock/frontend/src/components/details/RunStatusTab.tsx +79 -38
- flock/frontend/src/components/graph/AgentNode.test.tsx +3 -1
- flock/frontend/src/components/graph/AgentNode.tsx +8 -6
- flock/frontend/src/components/graph/GraphCanvas.tsx +13 -8
- flock/frontend/src/components/graph/MessageNode.test.tsx +3 -1
- flock/frontend/src/components/graph/MessageNode.tsx +16 -3
- flock/frontend/src/components/layout/DashboardLayout.tsx +12 -9
- flock/frontend/src/components/modules/HistoricalArtifactsModule.tsx +4 -14
- flock/frontend/src/components/modules/ModuleRegistry.ts +5 -3
- flock/frontend/src/hooks/useModules.ts +12 -4
- flock/frontend/src/hooks/usePersistence.ts +5 -3
- flock/frontend/src/services/api.ts +3 -19
- flock/frontend/src/services/graphService.test.ts +330 -0
- flock/frontend/src/services/graphService.ts +75 -0
- flock/frontend/src/services/websocket.ts +104 -268
- flock/frontend/src/store/filterStore.test.ts +89 -1
- flock/frontend/src/store/filterStore.ts +38 -16
- flock/frontend/src/store/graphStore.test.ts +538 -173
- flock/frontend/src/store/graphStore.ts +374 -465
- flock/frontend/src/store/moduleStore.ts +51 -33
- flock/frontend/src/store/uiStore.ts +23 -11
- flock/frontend/src/types/graph.ts +77 -44
- flock/frontend/src/utils/mockData.ts +16 -3
- flock/frontend/vite.config.ts +2 -2
- flock/orchestrator.py +27 -7
- flock/patches/__init__.py +5 -0
- flock/patches/dspy_streaming_patch.py +82 -0
- flock/service.py +2 -2
- flock/store.py +169 -4
- flock/themes/darkmatrix.toml +2 -2
- flock/themes/deep.toml +2 -2
- flock/themes/neopolitan.toml +4 -4
- {flock_core-0.5.0b71.dist-info → flock_core-0.5.1.dist-info}/METADATA +20 -13
- {flock_core-0.5.0b71.dist-info → flock_core-0.5.1.dist-info}/RECORD +59 -53
- flock/frontend/src/__tests__/e2e/critical-scenarios.test.tsx +0 -586
- flock/frontend/src/__tests__/integration/filtering-e2e.test.tsx +0 -391
- flock/frontend/src/__tests__/integration/graph-rendering.test.tsx +0 -640
- flock/frontend/src/services/websocket.test.ts +0 -595
- flock/frontend/src/utils/transforms.test.ts +0 -860
- flock/frontend/src/utils/transforms.ts +0 -323
- {flock_core-0.5.0b71.dist-info → flock_core-0.5.1.dist-info}/WHEEL +0 -0
- {flock_core-0.5.0b71.dist-info → flock_core-0.5.1.dist-info}/entry_points.txt +0 -0
- {flock_core-0.5.0b71.dist-info → flock_core-0.5.1.dist-info}/licenses/LICENSE +0 -0
flock/agent.py
CHANGED
|
@@ -80,6 +80,7 @@ class AgentOutput:
|
|
|
80
80
|
partition_key=metadata.get("partition_key"),
|
|
81
81
|
tags=metadata.get("tags"),
|
|
82
82
|
version=metadata.get("version", 1),
|
|
83
|
+
artifact_id=metadata.get("artifact_id"), # Phase 6: Preserve engine's ID
|
|
83
84
|
)
|
|
84
85
|
|
|
85
86
|
|
|
@@ -301,12 +302,20 @@ class Agent(metaclass=AutoTracedMeta):
|
|
|
301
302
|
|
|
302
303
|
produced: list[Artifact] = []
|
|
303
304
|
for output_decl in self.outputs:
|
|
305
|
+
# Phase 6: Find the matching artifact from engine result to preserve its ID
|
|
306
|
+
matching_artifact = self._find_matching_artifact(output_decl, result)
|
|
307
|
+
|
|
304
308
|
payload = self._select_payload(output_decl, result)
|
|
305
309
|
if payload is None:
|
|
306
310
|
continue
|
|
307
311
|
metadata = {
|
|
308
|
-
"correlation_id": ctx.correlation_id
|
|
312
|
+
"correlation_id": ctx.correlation_id,
|
|
309
313
|
}
|
|
314
|
+
|
|
315
|
+
# Phase 6: Preserve artifact ID from engine (for streaming message preview)
|
|
316
|
+
if matching_artifact:
|
|
317
|
+
metadata["artifact_id"] = matching_artifact.id
|
|
318
|
+
|
|
310
319
|
artifact = output_decl.apply(payload, produced_by=self.name, metadata=metadata)
|
|
311
320
|
produced.append(artifact)
|
|
312
321
|
await ctx.board.publish(artifact)
|
|
@@ -371,6 +380,35 @@ class Agent(metaclass=AutoTracedMeta):
|
|
|
371
380
|
self.utilities = [default_component]
|
|
372
381
|
return self.utilities
|
|
373
382
|
|
|
383
|
+
def _find_matching_artifact(
|
|
384
|
+
self, output_decl: AgentOutput, result: EvalResult
|
|
385
|
+
) -> Artifact | None:
|
|
386
|
+
"""Phase 6: Find artifact from engine result that matches this output declaration.
|
|
387
|
+
|
|
388
|
+
Returns the artifact object (with its ID) so we can preserve it when creating
|
|
389
|
+
the final published artifact. This ensures streaming events use the same ID.
|
|
390
|
+
"""
|
|
391
|
+
from flock.registry import type_registry
|
|
392
|
+
|
|
393
|
+
if not result.artifacts:
|
|
394
|
+
return None
|
|
395
|
+
|
|
396
|
+
# Normalize the expected type name to canonical form
|
|
397
|
+
expected_canonical = type_registry.resolve_name(output_decl.spec.type_name)
|
|
398
|
+
|
|
399
|
+
for artifact in result.artifacts:
|
|
400
|
+
# Normalize artifact type name to canonical form for comparison
|
|
401
|
+
try:
|
|
402
|
+
artifact_canonical = type_registry.resolve_name(artifact.type)
|
|
403
|
+
if artifact_canonical == expected_canonical:
|
|
404
|
+
return artifact
|
|
405
|
+
except Exception:
|
|
406
|
+
# If normalization fails, fall back to direct comparison
|
|
407
|
+
if artifact.type == output_decl.spec.type_name:
|
|
408
|
+
return artifact
|
|
409
|
+
|
|
410
|
+
return None
|
|
411
|
+
|
|
374
412
|
def _select_payload(
|
|
375
413
|
self, output_decl: AgentOutput, result: EvalResult
|
|
376
414
|
) -> dict[str, Any] | None:
|
flock/artifacts.py
CHANGED
|
@@ -51,18 +51,25 @@ class ArtifactSpec(BaseModel):
|
|
|
51
51
|
partition_key: str | None = None,
|
|
52
52
|
tags: set[str] | None = None,
|
|
53
53
|
version: int = 1,
|
|
54
|
+
artifact_id: UUID | None = None, # Phase 6: Optional pre-generated ID
|
|
54
55
|
) -> Artifact:
|
|
55
56
|
payload_model = self.model(**data)
|
|
56
|
-
|
|
57
|
-
type
|
|
58
|
-
payload
|
|
59
|
-
produced_by
|
|
60
|
-
visibility
|
|
61
|
-
correlation_id
|
|
62
|
-
partition_key
|
|
63
|
-
tags
|
|
64
|
-
version
|
|
65
|
-
|
|
57
|
+
artifact_kwargs = {
|
|
58
|
+
"type": self.type_name,
|
|
59
|
+
"payload": payload_model.model_dump(),
|
|
60
|
+
"produced_by": produced_by,
|
|
61
|
+
"visibility": ensure_visibility(visibility),
|
|
62
|
+
"correlation_id": correlation_id,
|
|
63
|
+
"partition_key": partition_key,
|
|
64
|
+
"tags": tags or set(),
|
|
65
|
+
"version": version,
|
|
66
|
+
}
|
|
67
|
+
|
|
68
|
+
# Phase 6: Use pre-generated ID if provided (for streaming message preview)
|
|
69
|
+
if artifact_id is not None:
|
|
70
|
+
artifact_kwargs["id"] = artifact_id
|
|
71
|
+
|
|
72
|
+
return Artifact(**artifact_kwargs)
|
|
66
73
|
|
|
67
74
|
|
|
68
75
|
class ArtifactEnvelope(BaseModel):
|
flock/cli.py
CHANGED
|
@@ -63,7 +63,7 @@ def list_agents() -> None:
|
|
|
63
63
|
@app.command()
|
|
64
64
|
def serve(
|
|
65
65
|
host: str = "127.0.0.1",
|
|
66
|
-
port: int =
|
|
66
|
+
port: int = 8344,
|
|
67
67
|
sqlite_db: str | None = typer.Option(None, help="Path to SQLite blackboard store"),
|
|
68
68
|
) -> None:
|
|
69
69
|
"""Run the HTTP control plane bound to the demo orchestrator."""
|
flock/dashboard/__init__.py
CHANGED
|
@@ -12,6 +12,7 @@ from flock.dashboard.events import (
|
|
|
12
12
|
MessagePublishedEvent,
|
|
13
13
|
StreamingOutputEvent,
|
|
14
14
|
)
|
|
15
|
+
from flock.dashboard.graph_builder import GraphAssembler
|
|
15
16
|
from flock.dashboard.service import DashboardHTTPService
|
|
16
17
|
from flock.dashboard.websocket import WebSocketManager
|
|
17
18
|
|
|
@@ -22,6 +23,7 @@ __all__ = [
|
|
|
22
23
|
"AgentErrorEvent",
|
|
23
24
|
"DashboardEventCollector",
|
|
24
25
|
"DashboardHTTPService",
|
|
26
|
+
"GraphAssembler",
|
|
25
27
|
"MessagePublishedEvent",
|
|
26
28
|
"StreamingOutputEvent",
|
|
27
29
|
"WebSocketManager",
|
flock/dashboard/collector.py
CHANGED
|
@@ -5,10 +5,14 @@ Phase 1: Events stored in in-memory buffer (max 100 events).
|
|
|
5
5
|
Phase 3: Extended to emit via WebSocket using WebSocketManager.
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
|
+
import asyncio
|
|
9
|
+
import hashlib
|
|
10
|
+
import json
|
|
8
11
|
import traceback
|
|
9
|
-
from collections import deque
|
|
12
|
+
from collections import defaultdict, deque
|
|
13
|
+
from dataclasses import dataclass, field
|
|
10
14
|
from datetime import datetime, timezone
|
|
11
|
-
from typing import TYPE_CHECKING, Optional
|
|
15
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
12
16
|
|
|
13
17
|
from pydantic import PrivateAttr
|
|
14
18
|
|
|
@@ -21,8 +25,10 @@ from flock.dashboard.events import (
|
|
|
21
25
|
SubscriptionInfo,
|
|
22
26
|
VisibilitySpec,
|
|
23
27
|
)
|
|
28
|
+
from flock.dashboard.models.graph import GraphRun, GraphState
|
|
24
29
|
from flock.logging.logging import get_logger
|
|
25
30
|
from flock.runtime import Context
|
|
31
|
+
from flock.store import AgentSnapshotRecord, BlackboardStore
|
|
26
32
|
|
|
27
33
|
|
|
28
34
|
logger = get_logger("dashboard.collector")
|
|
@@ -33,6 +39,49 @@ if TYPE_CHECKING: # pragma: no cover - type hints only
|
|
|
33
39
|
from flock.dashboard.websocket import WebSocketManager
|
|
34
40
|
|
|
35
41
|
|
|
42
|
+
@dataclass(slots=True)
|
|
43
|
+
class RunRecord:
|
|
44
|
+
run_id: str
|
|
45
|
+
agent_name: str
|
|
46
|
+
correlation_id: str = ""
|
|
47
|
+
status: str = "active"
|
|
48
|
+
consumed_artifacts: list[str] = field(default_factory=list)
|
|
49
|
+
produced_artifacts: list[str] = field(default_factory=list)
|
|
50
|
+
duration_ms: float | None = None
|
|
51
|
+
started_at: datetime | None = None
|
|
52
|
+
completed_at: datetime | None = None
|
|
53
|
+
metrics: dict[str, Any] = field(default_factory=dict)
|
|
54
|
+
error_message: str | None = None
|
|
55
|
+
|
|
56
|
+
def to_graph_run(self) -> GraphRun:
|
|
57
|
+
status = self.status if self.status in {"active", "completed", "error"} else "active"
|
|
58
|
+
return GraphRun(
|
|
59
|
+
run_id=self.run_id,
|
|
60
|
+
agent_name=self.agent_name,
|
|
61
|
+
correlation_id=self.correlation_id or None,
|
|
62
|
+
status=status, # type: ignore[arg-type]
|
|
63
|
+
consumed_artifacts=list(self.consumed_artifacts),
|
|
64
|
+
produced_artifacts=list(self.produced_artifacts),
|
|
65
|
+
duration_ms=self.duration_ms,
|
|
66
|
+
started_at=self.started_at,
|
|
67
|
+
completed_at=self.completed_at,
|
|
68
|
+
metrics=dict(self.metrics),
|
|
69
|
+
error_message=self.error_message,
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
@dataclass(slots=True)
|
|
74
|
+
class AgentSnapshot:
|
|
75
|
+
name: str
|
|
76
|
+
description: str
|
|
77
|
+
subscriptions: list[str]
|
|
78
|
+
output_types: list[str]
|
|
79
|
+
labels: list[str]
|
|
80
|
+
first_seen: datetime
|
|
81
|
+
last_seen: datetime
|
|
82
|
+
signature: str
|
|
83
|
+
|
|
84
|
+
|
|
36
85
|
class DashboardEventCollector(AgentComponent):
|
|
37
86
|
"""Collects agent lifecycle events for dashboard visualization.
|
|
38
87
|
|
|
@@ -57,12 +106,26 @@ class DashboardEventCollector(AgentComponent):
|
|
|
57
106
|
# WebSocketManager for broadcasting events
|
|
58
107
|
_websocket_manager: Optional["WebSocketManager"] = PrivateAttr(default=None)
|
|
59
108
|
|
|
60
|
-
|
|
109
|
+
# Graph assembly helpers
|
|
110
|
+
_graph_lock: asyncio.Lock = PrivateAttr(default_factory=asyncio.Lock)
|
|
111
|
+
_run_registry: dict[str, RunRecord] = PrivateAttr(default_factory=dict)
|
|
112
|
+
_artifact_consumers: dict[str, set[str]] = PrivateAttr(default_factory=lambda: defaultdict(set))
|
|
113
|
+
_agent_status: dict[str, str] = PrivateAttr(default_factory=dict)
|
|
114
|
+
_agent_snapshots: dict[str, AgentSnapshot] = PrivateAttr(default_factory=dict)
|
|
115
|
+
|
|
116
|
+
def __init__(self, *, store: BlackboardStore | None = None, **data):
|
|
61
117
|
super().__init__(**data)
|
|
62
118
|
# In-memory buffer with max 100 events (LRU eviction)
|
|
63
119
|
self._events = deque(maxlen=100)
|
|
64
120
|
self._run_start_times = {}
|
|
65
121
|
self._websocket_manager = None
|
|
122
|
+
self._graph_lock = asyncio.Lock()
|
|
123
|
+
self._run_registry = {}
|
|
124
|
+
self._artifact_consumers = defaultdict(set)
|
|
125
|
+
self._agent_status = {}
|
|
126
|
+
self._store: BlackboardStore | None = store
|
|
127
|
+
self._persistent_loaded = False
|
|
128
|
+
self._agent_snapshots = {}
|
|
66
129
|
|
|
67
130
|
def set_websocket_manager(self, manager: "WebSocketManager") -> None:
|
|
68
131
|
"""Set WebSocketManager for broadcasting events.
|
|
@@ -100,6 +163,22 @@ class DashboardEventCollector(AgentComponent):
|
|
|
100
163
|
# Extract produced types from agent outputs
|
|
101
164
|
produced_types = [output.spec.type_name for output in agent.outputs]
|
|
102
165
|
|
|
166
|
+
correlation_id = str(ctx.correlation_id) if ctx.correlation_id else ""
|
|
167
|
+
async with self._graph_lock:
|
|
168
|
+
run = self._ensure_run_record(
|
|
169
|
+
run_id=ctx.task_id,
|
|
170
|
+
agent_name=agent.name,
|
|
171
|
+
correlation_id=correlation_id,
|
|
172
|
+
ensure_started=True,
|
|
173
|
+
)
|
|
174
|
+
run.status = "active"
|
|
175
|
+
for artifact_id in consumed_artifacts:
|
|
176
|
+
if artifact_id not in run.consumed_artifacts:
|
|
177
|
+
run.consumed_artifacts.append(artifact_id)
|
|
178
|
+
self._artifact_consumers[artifact_id].add(agent.name)
|
|
179
|
+
self._agent_status[agent.name] = "running"
|
|
180
|
+
await self._update_agent_snapshot_locked(agent)
|
|
181
|
+
|
|
103
182
|
# Build subscription info from agent's subscriptions
|
|
104
183
|
subscription_info = SubscriptionInfo(from_agents=[], channels=[], mode="both")
|
|
105
184
|
|
|
@@ -112,7 +191,7 @@ class DashboardEventCollector(AgentComponent):
|
|
|
112
191
|
|
|
113
192
|
# Create and store event
|
|
114
193
|
event = AgentActivatedEvent(
|
|
115
|
-
correlation_id=
|
|
194
|
+
correlation_id=correlation_id,
|
|
116
195
|
agent_name=agent.name,
|
|
117
196
|
agent_id=agent.name,
|
|
118
197
|
run_id=ctx.task_id, # Unique ID for this agent run
|
|
@@ -146,10 +225,24 @@ class DashboardEventCollector(AgentComponent):
|
|
|
146
225
|
"""
|
|
147
226
|
# Convert visibility to VisibilitySpec
|
|
148
227
|
visibility_spec = self._convert_visibility(artifact.visibility)
|
|
228
|
+
correlation_id = str(ctx.correlation_id) if ctx.correlation_id else ""
|
|
229
|
+
artifact_id = str(artifact.id)
|
|
230
|
+
|
|
231
|
+
async with self._graph_lock:
|
|
232
|
+
run = self._ensure_run_record(
|
|
233
|
+
run_id=ctx.task_id,
|
|
234
|
+
agent_name=agent.name,
|
|
235
|
+
correlation_id=correlation_id,
|
|
236
|
+
ensure_started=True,
|
|
237
|
+
)
|
|
238
|
+
run.status = "active"
|
|
239
|
+
if artifact_id not in run.produced_artifacts:
|
|
240
|
+
run.produced_artifacts.append(artifact_id)
|
|
241
|
+
await self._update_agent_snapshot_locked(agent)
|
|
149
242
|
|
|
150
243
|
# Create and store event
|
|
151
244
|
event = MessagePublishedEvent(
|
|
152
|
-
correlation_id=
|
|
245
|
+
correlation_id=correlation_id,
|
|
153
246
|
artifact_id=str(artifact.id),
|
|
154
247
|
artifact_type=artifact.type,
|
|
155
248
|
produced_by=artifact.produced_by,
|
|
@@ -210,6 +303,24 @@ class DashboardEventCollector(AgentComponent):
|
|
|
210
303
|
|
|
211
304
|
self._events.append(event)
|
|
212
305
|
|
|
306
|
+
async with self._graph_lock:
|
|
307
|
+
correlation_id = str(ctx.correlation_id) if ctx.correlation_id else ""
|
|
308
|
+
run = self._ensure_run_record(
|
|
309
|
+
run_id=ctx.task_id,
|
|
310
|
+
agent_name=agent.name,
|
|
311
|
+
correlation_id=correlation_id,
|
|
312
|
+
ensure_started=True,
|
|
313
|
+
)
|
|
314
|
+
run.status = "completed"
|
|
315
|
+
run.duration_ms = duration_ms
|
|
316
|
+
run.metrics = dict(metrics)
|
|
317
|
+
run.completed_at = datetime.now(timezone.utc)
|
|
318
|
+
for artifact_id in artifacts_produced:
|
|
319
|
+
if artifact_id not in run.produced_artifacts:
|
|
320
|
+
run.produced_artifacts.append(artifact_id)
|
|
321
|
+
self._agent_status[agent.name] = "idle"
|
|
322
|
+
await self._update_agent_snapshot_locked(agent)
|
|
323
|
+
|
|
213
324
|
# Broadcast via WebSocket if manager is configured
|
|
214
325
|
if self._websocket_manager:
|
|
215
326
|
await self._websocket_manager.broadcast(event)
|
|
@@ -248,10 +359,175 @@ class DashboardEventCollector(AgentComponent):
|
|
|
248
359
|
|
|
249
360
|
self._events.append(event)
|
|
250
361
|
|
|
362
|
+
async with self._graph_lock:
|
|
363
|
+
correlation_id = str(ctx.correlation_id) if ctx.correlation_id else ""
|
|
364
|
+
run = self._ensure_run_record(
|
|
365
|
+
run_id=ctx.task_id,
|
|
366
|
+
agent_name=agent.name,
|
|
367
|
+
correlation_id=correlation_id,
|
|
368
|
+
ensure_started=True,
|
|
369
|
+
)
|
|
370
|
+
run.status = "error"
|
|
371
|
+
run.error_message = error_message
|
|
372
|
+
run.completed_at = datetime.now(timezone.utc)
|
|
373
|
+
self._agent_status[agent.name] = "error"
|
|
374
|
+
await self._update_agent_snapshot_locked(agent)
|
|
375
|
+
|
|
251
376
|
# Broadcast via WebSocket if manager is configured
|
|
252
377
|
if self._websocket_manager:
|
|
253
378
|
await self._websocket_manager.broadcast(event)
|
|
254
379
|
|
|
380
|
+
async def snapshot_graph_state(self) -> GraphState:
|
|
381
|
+
"""Return a thread-safe snapshot of runs, consumptions, and agent status."""
|
|
382
|
+
async with self._graph_lock:
|
|
383
|
+
consumptions = {
|
|
384
|
+
artifact_id: sorted(consumers)
|
|
385
|
+
for artifact_id, consumers in self._artifact_consumers.items()
|
|
386
|
+
}
|
|
387
|
+
runs = [record.to_graph_run() for record in self._run_registry.values()]
|
|
388
|
+
agent_status = dict(self._agent_status)
|
|
389
|
+
return GraphState(consumptions=consumptions, runs=runs, agent_status=agent_status)
|
|
390
|
+
|
|
391
|
+
async def snapshot_agent_registry(self) -> dict[str, AgentSnapshot]:
|
|
392
|
+
"""Return a snapshot of all known agents (active and inactive)."""
|
|
393
|
+
await self.load_persistent_snapshots()
|
|
394
|
+
async with self._graph_lock:
|
|
395
|
+
return {
|
|
396
|
+
name: self._clone_snapshot(snapshot)
|
|
397
|
+
for name, snapshot in self._agent_snapshots.items()
|
|
398
|
+
}
|
|
399
|
+
|
|
400
|
+
async def load_persistent_snapshots(self) -> None:
|
|
401
|
+
if self._store is None or self._persistent_loaded:
|
|
402
|
+
return
|
|
403
|
+
records = await self._store.load_agent_snapshots()
|
|
404
|
+
async with self._graph_lock:
|
|
405
|
+
for record in records:
|
|
406
|
+
self._agent_snapshots[record.agent_name] = AgentSnapshot(
|
|
407
|
+
name=record.agent_name,
|
|
408
|
+
description=record.description,
|
|
409
|
+
subscriptions=list(record.subscriptions),
|
|
410
|
+
output_types=list(record.output_types),
|
|
411
|
+
labels=list(record.labels),
|
|
412
|
+
first_seen=record.first_seen,
|
|
413
|
+
last_seen=record.last_seen,
|
|
414
|
+
signature=record.signature,
|
|
415
|
+
)
|
|
416
|
+
self._persistent_loaded = True
|
|
417
|
+
|
|
418
|
+
async def clear_agent_registry(self) -> None:
|
|
419
|
+
"""Clear cached agent metadata (for explicit resets)."""
|
|
420
|
+
async with self._graph_lock:
|
|
421
|
+
self._agent_snapshots.clear()
|
|
422
|
+
if self._store is not None:
|
|
423
|
+
await self._store.clear_agent_snapshots()
|
|
424
|
+
|
|
425
|
+
def _ensure_run_record(
|
|
426
|
+
self,
|
|
427
|
+
*,
|
|
428
|
+
run_id: str,
|
|
429
|
+
agent_name: str,
|
|
430
|
+
correlation_id: str,
|
|
431
|
+
ensure_started: bool = False,
|
|
432
|
+
) -> RunRecord:
|
|
433
|
+
"""Internal helper. Caller must hold _graph_lock."""
|
|
434
|
+
run = self._run_registry.get(run_id)
|
|
435
|
+
if not run:
|
|
436
|
+
run = RunRecord(
|
|
437
|
+
run_id=run_id,
|
|
438
|
+
agent_name=agent_name,
|
|
439
|
+
correlation_id=correlation_id,
|
|
440
|
+
started_at=datetime.now(timezone.utc) if ensure_started else None,
|
|
441
|
+
)
|
|
442
|
+
self._run_registry[run_id] = run
|
|
443
|
+
else:
|
|
444
|
+
run.agent_name = agent_name
|
|
445
|
+
if correlation_id:
|
|
446
|
+
run.correlation_id = correlation_id
|
|
447
|
+
if ensure_started and run.started_at is None:
|
|
448
|
+
run.started_at = datetime.now(timezone.utc)
|
|
449
|
+
return run
|
|
450
|
+
|
|
451
|
+
async def _update_agent_snapshot_locked(self, agent: "Agent") -> None:
|
|
452
|
+
now = datetime.now(timezone.utc)
|
|
453
|
+
description = agent.description or ""
|
|
454
|
+
subscriptions = sorted(
|
|
455
|
+
{
|
|
456
|
+
type_name
|
|
457
|
+
for subscription in getattr(agent, "subscriptions", [])
|
|
458
|
+
for type_name in getattr(subscription, "type_names", [])
|
|
459
|
+
}
|
|
460
|
+
)
|
|
461
|
+
output_types = sorted(
|
|
462
|
+
{
|
|
463
|
+
output.spec.type_name
|
|
464
|
+
for output in getattr(agent, "outputs", [])
|
|
465
|
+
if getattr(output, "spec", None) is not None
|
|
466
|
+
and getattr(output.spec, "type_name", "")
|
|
467
|
+
}
|
|
468
|
+
)
|
|
469
|
+
labels = sorted(agent.labels)
|
|
470
|
+
|
|
471
|
+
signature_payload = {
|
|
472
|
+
"description": description,
|
|
473
|
+
"subscriptions": subscriptions,
|
|
474
|
+
"output_types": output_types,
|
|
475
|
+
"labels": labels,
|
|
476
|
+
}
|
|
477
|
+
signature = hashlib.sha256(
|
|
478
|
+
json.dumps(signature_payload, sort_keys=True).encode("utf-8")
|
|
479
|
+
).hexdigest()
|
|
480
|
+
|
|
481
|
+
snapshot = self._agent_snapshots.get(agent.name)
|
|
482
|
+
if snapshot is None:
|
|
483
|
+
snapshot = AgentSnapshot(
|
|
484
|
+
name=agent.name,
|
|
485
|
+
description=description,
|
|
486
|
+
subscriptions=subscriptions,
|
|
487
|
+
output_types=output_types,
|
|
488
|
+
labels=labels,
|
|
489
|
+
first_seen=now,
|
|
490
|
+
last_seen=now,
|
|
491
|
+
signature=signature,
|
|
492
|
+
)
|
|
493
|
+
self._agent_snapshots[agent.name] = snapshot
|
|
494
|
+
else:
|
|
495
|
+
snapshot.description = description
|
|
496
|
+
snapshot.subscriptions = subscriptions
|
|
497
|
+
snapshot.output_types = output_types
|
|
498
|
+
snapshot.labels = labels
|
|
499
|
+
snapshot.last_seen = now
|
|
500
|
+
snapshot.signature = signature
|
|
501
|
+
|
|
502
|
+
if self._store is not None:
|
|
503
|
+
record = self._snapshot_to_record(snapshot)
|
|
504
|
+
await self._store.upsert_agent_snapshot(record)
|
|
505
|
+
|
|
506
|
+
@staticmethod
|
|
507
|
+
def _clone_snapshot(snapshot: AgentSnapshot) -> AgentSnapshot:
|
|
508
|
+
return AgentSnapshot(
|
|
509
|
+
name=snapshot.name,
|
|
510
|
+
description=snapshot.description,
|
|
511
|
+
subscriptions=list(snapshot.subscriptions),
|
|
512
|
+
output_types=list(snapshot.output_types),
|
|
513
|
+
labels=list(snapshot.labels),
|
|
514
|
+
first_seen=snapshot.first_seen,
|
|
515
|
+
last_seen=snapshot.last_seen,
|
|
516
|
+
signature=snapshot.signature,
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
def _snapshot_to_record(self, snapshot: AgentSnapshot) -> AgentSnapshotRecord:
|
|
520
|
+
return AgentSnapshotRecord(
|
|
521
|
+
agent_name=snapshot.name,
|
|
522
|
+
description=snapshot.description,
|
|
523
|
+
subscriptions=list(snapshot.subscriptions),
|
|
524
|
+
output_types=list(snapshot.output_types),
|
|
525
|
+
labels=list(snapshot.labels),
|
|
526
|
+
first_seen=snapshot.first_seen,
|
|
527
|
+
last_seen=snapshot.last_seen,
|
|
528
|
+
signature=snapshot.signature,
|
|
529
|
+
)
|
|
530
|
+
|
|
255
531
|
def _convert_visibility(self, visibility) -> VisibilitySpec:
|
|
256
532
|
"""Convert flock.visibility.Visibility to VisibilitySpec.
|
|
257
533
|
|
|
@@ -280,4 +556,4 @@ class DashboardEventCollector(AgentComponent):
|
|
|
280
556
|
return spec
|
|
281
557
|
|
|
282
558
|
|
|
283
|
-
__all__ = ["DashboardEventCollector"]
|
|
559
|
+
__all__ = ["AgentSnapshot", "DashboardEventCollector"]
|
flock/dashboard/events.py
CHANGED
|
@@ -102,6 +102,8 @@ class StreamingOutputEvent(BaseModel):
|
|
|
102
102
|
|
|
103
103
|
For Phase 1: This is optional and not fully implemented.
|
|
104
104
|
Schema per DATA_MODEL.md lines 152-159.
|
|
105
|
+
|
|
106
|
+
Phase 6 Extension: Added artifact_id for message node streaming in blackboard view.
|
|
105
107
|
"""
|
|
106
108
|
|
|
107
109
|
# Event metadata
|
|
@@ -120,6 +122,10 @@ class StreamingOutputEvent(BaseModel):
|
|
|
120
122
|
sequence: int # Monotonic sequence for ordering
|
|
121
123
|
is_final: bool = False # True when agent completes this output stream
|
|
122
124
|
|
|
125
|
+
# Artifact tracking (Phase 6: for message streaming preview)
|
|
126
|
+
artifact_id: str | None = None # Pre-generated artifact ID for streaming message nodes
|
|
127
|
+
artifact_type: str | None = None # Artifact type name (e.g., "__main__.BookOutline")
|
|
128
|
+
|
|
123
129
|
|
|
124
130
|
class AgentCompletedEvent(BaseModel):
|
|
125
131
|
"""Event emitted when agent execution finishes successfully.
|