flock-core 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/agent.py +16 -3
- flock/artifact_collector.py +158 -0
- flock/batch_accumulator.py +252 -0
- flock/correlation_engine.py +223 -0
- flock/dashboard/collector.py +4 -0
- flock/dashboard/events.py +74 -0
- flock/dashboard/graph_builder.py +272 -0
- flock/dashboard/models/graph.py +3 -1
- flock/dashboard/service.py +363 -14
- flock/frontend/package.json +1 -1
- flock/frontend/src/components/controls/PublishControl.test.tsx +11 -11
- flock/frontend/src/components/controls/PublishControl.tsx +1 -1
- flock/frontend/src/components/graph/AgentNode.tsx +4 -0
- flock/frontend/src/components/graph/GraphCanvas.tsx +4 -0
- flock/frontend/src/components/graph/LogicOperationsDisplay.tsx +463 -0
- flock/frontend/src/components/graph/PendingBatchEdge.tsx +141 -0
- flock/frontend/src/components/graph/PendingJoinEdge.tsx +144 -0
- flock/frontend/src/services/graphService.ts +3 -1
- flock/frontend/src/services/websocket.ts +99 -1
- flock/frontend/src/store/graphStore.test.ts +2 -1
- flock/frontend/src/store/graphStore.ts +36 -5
- flock/frontend/src/types/graph.ts +86 -0
- flock/orchestrator.py +263 -3
- flock/patches/__init__.py +1 -0
- flock/patches/dspy_streaming_patch.py +1 -0
- flock/subscription.py +70 -7
- {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/METADATA +70 -14
- {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/RECORD +31 -25
- {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/WHEEL +0 -0
- {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/entry_points.txt +0 -0
- {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/licenses/LICENSE +0 -0
flock/dashboard/service.py
CHANGED
|
@@ -8,6 +8,7 @@ Provides real-time dashboard capabilities by:
|
|
|
8
8
|
"""
|
|
9
9
|
|
|
10
10
|
import os
|
|
11
|
+
from datetime import datetime, timedelta, timezone
|
|
11
12
|
from importlib.metadata import PackageNotFoundError, version
|
|
12
13
|
from pathlib import Path
|
|
13
14
|
from typing import Any
|
|
@@ -201,7 +202,10 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
201
202
|
|
|
202
203
|
@app.get("/api/agents")
|
|
203
204
|
async def get_agents() -> dict[str, Any]:
|
|
204
|
-
"""Get all registered agents.
|
|
205
|
+
"""Get all registered agents with logic operations state.
|
|
206
|
+
|
|
207
|
+
Phase 1.2 Enhancement: Now includes logic_operations configuration
|
|
208
|
+
and waiting state for agents using JoinSpec or BatchSpec.
|
|
205
209
|
|
|
206
210
|
Returns:
|
|
207
211
|
{
|
|
@@ -209,9 +213,18 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
209
213
|
{
|
|
210
214
|
"name": "agent_name",
|
|
211
215
|
"description": "...",
|
|
212
|
-
"status": "ready",
|
|
216
|
+
"status": "ready" | "waiting" | "active",
|
|
213
217
|
"subscriptions": ["TypeA", "TypeB"],
|
|
214
|
-
"output_types": ["TypeC", "TypeD"]
|
|
218
|
+
"output_types": ["TypeC", "TypeD"],
|
|
219
|
+
"logic_operations": [ # NEW: Phase 1.2
|
|
220
|
+
{
|
|
221
|
+
"subscription_index": 0,
|
|
222
|
+
"subscription_types": ["TypeA", "TypeB"],
|
|
223
|
+
"join": {...}, # JoinSpec config
|
|
224
|
+
"batch": {...}, # BatchSpec config
|
|
225
|
+
"waiting_state": {...} # Current state
|
|
226
|
+
}
|
|
227
|
+
]
|
|
215
228
|
},
|
|
216
229
|
...
|
|
217
230
|
]
|
|
@@ -228,15 +241,25 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
228
241
|
# Extract produced types from agent outputs
|
|
229
242
|
produced_types = [output.spec.type_name for output in agent.outputs]
|
|
230
243
|
|
|
231
|
-
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
244
|
+
# NEW Phase 1.2: Logic operations configuration
|
|
245
|
+
logic_operations = []
|
|
246
|
+
for idx, subscription in enumerate(agent.subscriptions):
|
|
247
|
+
logic_config = _build_logic_config(agent, subscription, idx, orchestrator)
|
|
248
|
+
if logic_config: # Only include if has join/batch
|
|
249
|
+
logic_operations.append(logic_config)
|
|
250
|
+
|
|
251
|
+
agent_data = {
|
|
252
|
+
"name": agent.name,
|
|
253
|
+
"description": agent.description or "",
|
|
254
|
+
"status": _compute_agent_status(agent, orchestrator), # NEW: Dynamic status
|
|
255
|
+
"subscriptions": consumed_types,
|
|
256
|
+
"output_types": produced_types,
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
if logic_operations:
|
|
260
|
+
agent_data["logic_operations"] = logic_operations
|
|
261
|
+
|
|
262
|
+
agents.append(agent_data)
|
|
240
263
|
|
|
241
264
|
return {"agents": agents}
|
|
242
265
|
|
|
@@ -693,10 +716,10 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
693
716
|
if time_range and time_range[0]:
|
|
694
717
|
# Convert nanoseconds to datetime
|
|
695
718
|
oldest_trace = datetime.fromtimestamp(
|
|
696
|
-
time_range[0] / 1_000_000_000
|
|
719
|
+
time_range[0] / 1_000_000_000, tz=timezone.utc
|
|
697
720
|
).isoformat()
|
|
698
721
|
newest_trace = datetime.fromtimestamp(
|
|
699
|
-
time_range[1] / 1_000_000_000
|
|
722
|
+
time_range[1] / 1_000_000_000, tz=timezone.utc
|
|
700
723
|
).isoformat()
|
|
701
724
|
|
|
702
725
|
# Get file size
|
|
@@ -988,4 +1011,330 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
988
1011
|
return self.app
|
|
989
1012
|
|
|
990
1013
|
|
|
1014
|
+
def _get_correlation_groups(
|
|
1015
|
+
engine: "CorrelationEngine", # noqa: F821
|
|
1016
|
+
agent_name: str,
|
|
1017
|
+
subscription_index: int,
|
|
1018
|
+
) -> list[dict[str, Any]]:
|
|
1019
|
+
"""Extract correlation group state from CorrelationEngine.
|
|
1020
|
+
|
|
1021
|
+
Returns waiting state for all correlation groups for the given agent subscription.
|
|
1022
|
+
Used by enhanced /api/agents endpoint to expose JoinSpec waiting state.
|
|
1023
|
+
|
|
1024
|
+
Args:
|
|
1025
|
+
engine: CorrelationEngine instance from orchestrator
|
|
1026
|
+
agent_name: Name of the agent
|
|
1027
|
+
subscription_index: Index of the subscription (for agents with multiple subscriptions)
|
|
1028
|
+
|
|
1029
|
+
Returns:
|
|
1030
|
+
List of correlation group states with progress metrics:
|
|
1031
|
+
[
|
|
1032
|
+
{
|
|
1033
|
+
"correlation_key": "patient_123",
|
|
1034
|
+
"created_at": "2025-10-13T14:30:00Z",
|
|
1035
|
+
"elapsed_seconds": 45.2,
|
|
1036
|
+
"expires_in_seconds": 254.8, # For time windows
|
|
1037
|
+
"expires_in_artifacts": 7, # For count windows
|
|
1038
|
+
"collected_types": {"XRayImage": 1, "LabResults": 0},
|
|
1039
|
+
"required_types": {"XRayImage": 1, "LabResults": 1},
|
|
1040
|
+
"waiting_for": ["LabResults"],
|
|
1041
|
+
"is_complete": False,
|
|
1042
|
+
"is_expired": False
|
|
1043
|
+
},
|
|
1044
|
+
...
|
|
1045
|
+
]
|
|
1046
|
+
"""
|
|
1047
|
+
|
|
1048
|
+
pool_key = (agent_name, subscription_index)
|
|
1049
|
+
groups = engine.correlation_groups.get(pool_key, {})
|
|
1050
|
+
|
|
1051
|
+
if not groups:
|
|
1052
|
+
return []
|
|
1053
|
+
|
|
1054
|
+
now = datetime.now(timezone.utc)
|
|
1055
|
+
result = []
|
|
1056
|
+
|
|
1057
|
+
for corr_key, group in groups.items():
|
|
1058
|
+
# Calculate elapsed time
|
|
1059
|
+
if group.created_at_time:
|
|
1060
|
+
created_at_time = group.created_at_time
|
|
1061
|
+
if created_at_time.tzinfo is None:
|
|
1062
|
+
created_at_time = created_at_time.replace(tzinfo=timezone.utc)
|
|
1063
|
+
elapsed = (now - created_at_time).total_seconds()
|
|
1064
|
+
else:
|
|
1065
|
+
elapsed = 0
|
|
1066
|
+
|
|
1067
|
+
# Calculate time remaining (for time windows)
|
|
1068
|
+
expires_in_seconds = None
|
|
1069
|
+
if isinstance(group.window_spec, timedelta):
|
|
1070
|
+
window_seconds = group.window_spec.total_seconds()
|
|
1071
|
+
expires_in_seconds = max(0, window_seconds - elapsed)
|
|
1072
|
+
|
|
1073
|
+
# Calculate artifact count remaining (for count windows)
|
|
1074
|
+
expires_in_artifacts = None
|
|
1075
|
+
if isinstance(group.window_spec, int):
|
|
1076
|
+
artifacts_passed = engine.global_sequence - group.created_at_sequence
|
|
1077
|
+
expires_in_artifacts = max(0, group.window_spec - artifacts_passed)
|
|
1078
|
+
|
|
1079
|
+
# Determine what we're waiting for
|
|
1080
|
+
collected_types = {
|
|
1081
|
+
type_name: len(group.waiting_artifacts.get(type_name, []))
|
|
1082
|
+
for type_name in group.required_types
|
|
1083
|
+
}
|
|
1084
|
+
|
|
1085
|
+
waiting_for = [
|
|
1086
|
+
type_name
|
|
1087
|
+
for type_name, required_count in group.type_counts.items()
|
|
1088
|
+
if collected_types.get(type_name, 0) < required_count
|
|
1089
|
+
]
|
|
1090
|
+
|
|
1091
|
+
result.append(
|
|
1092
|
+
{
|
|
1093
|
+
"correlation_key": str(corr_key),
|
|
1094
|
+
"created_at": group.created_at_time.isoformat() if group.created_at_time else None,
|
|
1095
|
+
"elapsed_seconds": round(elapsed, 1),
|
|
1096
|
+
"expires_in_seconds": round(expires_in_seconds, 1)
|
|
1097
|
+
if expires_in_seconds is not None
|
|
1098
|
+
else None,
|
|
1099
|
+
"expires_in_artifacts": expires_in_artifacts,
|
|
1100
|
+
"collected_types": collected_types,
|
|
1101
|
+
"required_types": dict(group.type_counts),
|
|
1102
|
+
"waiting_for": waiting_for,
|
|
1103
|
+
"is_complete": group.is_complete(),
|
|
1104
|
+
"is_expired": group.is_expired(engine.global_sequence),
|
|
1105
|
+
}
|
|
1106
|
+
)
|
|
1107
|
+
|
|
1108
|
+
return result
|
|
1109
|
+
|
|
1110
|
+
|
|
1111
|
+
def _get_batch_state(
|
|
1112
|
+
engine: "BatchEngine", # noqa: F821
|
|
1113
|
+
agent_name: str,
|
|
1114
|
+
subscription_index: int,
|
|
1115
|
+
batch_spec: "BatchSpec", # noqa: F821
|
|
1116
|
+
) -> dict[str, Any] | None:
|
|
1117
|
+
"""Extract batch state from BatchEngine.
|
|
1118
|
+
|
|
1119
|
+
Returns current batch accumulator state for the given agent subscription.
|
|
1120
|
+
Used by enhanced /api/agents endpoint to expose BatchSpec waiting state.
|
|
1121
|
+
|
|
1122
|
+
Args:
|
|
1123
|
+
engine: BatchEngine instance from orchestrator
|
|
1124
|
+
agent_name: Name of the agent
|
|
1125
|
+
subscription_index: Index of the subscription
|
|
1126
|
+
batch_spec: BatchSpec configuration (needed for metrics)
|
|
1127
|
+
|
|
1128
|
+
Returns:
|
|
1129
|
+
Batch state dict or None if no batch or batch is empty:
|
|
1130
|
+
{
|
|
1131
|
+
"created_at": "2025-10-13T14:30:00Z",
|
|
1132
|
+
"elapsed_seconds": 12.5,
|
|
1133
|
+
"items_collected": 18,
|
|
1134
|
+
"items_target": 25,
|
|
1135
|
+
"items_remaining": 7,
|
|
1136
|
+
"timeout_seconds": 30,
|
|
1137
|
+
"timeout_remaining_seconds": 17.5,
|
|
1138
|
+
"will_flush": "on_size" | "on_timeout" | "unknown"
|
|
1139
|
+
}
|
|
1140
|
+
"""
|
|
1141
|
+
|
|
1142
|
+
batch_key = (agent_name, subscription_index)
|
|
1143
|
+
accumulator = engine.batches.get(batch_key)
|
|
1144
|
+
|
|
1145
|
+
# Return None if no batch or batch is empty
|
|
1146
|
+
if not accumulator or not accumulator.artifacts:
|
|
1147
|
+
return None
|
|
1148
|
+
|
|
1149
|
+
now = datetime.now(timezone.utc)
|
|
1150
|
+
# Ensure accumulator.created_at is timezone-aware
|
|
1151
|
+
created_at = accumulator.created_at
|
|
1152
|
+
if created_at.tzinfo is None:
|
|
1153
|
+
created_at = created_at.replace(tzinfo=timezone.utc)
|
|
1154
|
+
elapsed = (now - created_at).total_seconds()
|
|
1155
|
+
|
|
1156
|
+
# Calculate items collected (needed for all batch types)
|
|
1157
|
+
items_collected = len(accumulator.artifacts)
|
|
1158
|
+
# For group batching, use _group_count if available
|
|
1159
|
+
if hasattr(accumulator, "_group_count"):
|
|
1160
|
+
items_collected = accumulator._group_count
|
|
1161
|
+
|
|
1162
|
+
result = {
|
|
1163
|
+
"created_at": accumulator.created_at.isoformat(),
|
|
1164
|
+
"elapsed_seconds": round(elapsed, 1),
|
|
1165
|
+
"items_collected": items_collected, # Always include for all batch types
|
|
1166
|
+
}
|
|
1167
|
+
|
|
1168
|
+
# Size-based metrics (only if size threshold configured)
|
|
1169
|
+
if batch_spec.size:
|
|
1170
|
+
result["items_target"] = batch_spec.size
|
|
1171
|
+
result["items_remaining"] = max(0, batch_spec.size - items_collected)
|
|
1172
|
+
else:
|
|
1173
|
+
# Timeout-only batches: no target
|
|
1174
|
+
result["items_target"] = None
|
|
1175
|
+
result["items_remaining"] = None
|
|
1176
|
+
|
|
1177
|
+
# Timeout-based metrics
|
|
1178
|
+
if batch_spec.timeout:
|
|
1179
|
+
timeout_seconds = batch_spec.timeout.total_seconds()
|
|
1180
|
+
timeout_remaining = max(0, timeout_seconds - elapsed)
|
|
1181
|
+
|
|
1182
|
+
result["timeout_seconds"] = int(timeout_seconds)
|
|
1183
|
+
result["timeout_remaining_seconds"] = round(timeout_remaining, 1)
|
|
1184
|
+
|
|
1185
|
+
# Determine what will trigger flush
|
|
1186
|
+
if batch_spec.size and batch_spec.timeout:
|
|
1187
|
+
# Hybrid: predict which will fire first based on progress percentages
|
|
1188
|
+
items_collected = result["items_collected"]
|
|
1189
|
+
items_target = result.get("items_target", 1)
|
|
1190
|
+
timeout_remaining = result.get("timeout_remaining_seconds", 0)
|
|
1191
|
+
|
|
1192
|
+
# Calculate progress toward each threshold
|
|
1193
|
+
size_progress = items_collected / items_target if items_target > 0 else 0
|
|
1194
|
+
timeout_elapsed = elapsed
|
|
1195
|
+
timeout_total = batch_spec.timeout.total_seconds()
|
|
1196
|
+
time_progress = timeout_elapsed / timeout_total if timeout_total > 0 else 0
|
|
1197
|
+
|
|
1198
|
+
# Predict based on which threshold we're progressing toward faster
|
|
1199
|
+
# If we're closer to size threshold (percentage-wise), predict size
|
|
1200
|
+
# Otherwise predict timeout
|
|
1201
|
+
if size_progress > time_progress:
|
|
1202
|
+
result["will_flush"] = "on_size"
|
|
1203
|
+
else:
|
|
1204
|
+
result["will_flush"] = "on_timeout"
|
|
1205
|
+
elif batch_spec.size:
|
|
1206
|
+
result["will_flush"] = "on_size"
|
|
1207
|
+
elif batch_spec.timeout:
|
|
1208
|
+
result["will_flush"] = "on_timeout"
|
|
1209
|
+
|
|
1210
|
+
return result
|
|
1211
|
+
|
|
1212
|
+
|
|
1213
|
+
def _compute_agent_status(agent: "Agent", orchestrator: "Flock") -> str: # noqa: F821
|
|
1214
|
+
"""Determine agent status based on waiting state.
|
|
1215
|
+
|
|
1216
|
+
Checks if agent is waiting for correlation or batch completion.
|
|
1217
|
+
Used by enhanced /api/agents endpoint to show agent status.
|
|
1218
|
+
|
|
1219
|
+
Args:
|
|
1220
|
+
agent: Agent instance
|
|
1221
|
+
orchestrator: Flock orchestrator instance
|
|
1222
|
+
|
|
1223
|
+
Returns:
|
|
1224
|
+
"ready" - Agent not waiting for anything
|
|
1225
|
+
"waiting" - Agent has correlation groups or batches accumulating
|
|
1226
|
+
"active" - Agent currently executing (future enhancement)
|
|
1227
|
+
"""
|
|
1228
|
+
# Check if any subscription is waiting for correlation or batching
|
|
1229
|
+
for idx, subscription in enumerate(agent.subscriptions):
|
|
1230
|
+
if subscription.join:
|
|
1231
|
+
pool_key = (agent.name, idx)
|
|
1232
|
+
if pool_key in orchestrator._correlation_engine.correlation_groups:
|
|
1233
|
+
groups = orchestrator._correlation_engine.correlation_groups[pool_key]
|
|
1234
|
+
if groups: # Has waiting correlation groups
|
|
1235
|
+
return "waiting"
|
|
1236
|
+
|
|
1237
|
+
if subscription.batch:
|
|
1238
|
+
batch_key = (agent.name, idx)
|
|
1239
|
+
if batch_key in orchestrator._batch_engine.batches:
|
|
1240
|
+
accumulator = orchestrator._batch_engine.batches[batch_key]
|
|
1241
|
+
if accumulator and accumulator.artifacts:
|
|
1242
|
+
return "waiting"
|
|
1243
|
+
|
|
1244
|
+
return "ready"
|
|
1245
|
+
|
|
1246
|
+
|
|
1247
|
+
def _build_logic_config( # noqa: F821
|
|
1248
|
+
agent: "Agent", # noqa: F821
|
|
1249
|
+
subscription: "Subscription", # noqa: F821
|
|
1250
|
+
idx: int,
|
|
1251
|
+
orchestrator: "Flock",
|
|
1252
|
+
) -> dict[str, Any] | None:
|
|
1253
|
+
"""Build logic operations configuration for a subscription.
|
|
1254
|
+
|
|
1255
|
+
Phase 1.2: Extracts JoinSpec and BatchSpec configuration plus current
|
|
1256
|
+
waiting state for agents using logic operations.
|
|
1257
|
+
|
|
1258
|
+
Args:
|
|
1259
|
+
agent: Agent instance
|
|
1260
|
+
subscription: Subscription to analyze
|
|
1261
|
+
idx: Subscription index (for agents with multiple subscriptions)
|
|
1262
|
+
orchestrator: Flock orchestrator instance
|
|
1263
|
+
|
|
1264
|
+
Returns:
|
|
1265
|
+
Logic operations config dict or None if no join/batch:
|
|
1266
|
+
{
|
|
1267
|
+
"subscription_index": 0,
|
|
1268
|
+
"subscription_types": ["XRayImage", "LabResults"],
|
|
1269
|
+
"join": {...}, # JoinSpec config (if present)
|
|
1270
|
+
"batch": {...}, # BatchSpec config (if present)
|
|
1271
|
+
"waiting_state": {...} # Current state (if waiting)
|
|
1272
|
+
}
|
|
1273
|
+
"""
|
|
1274
|
+
if not subscription.join and not subscription.batch:
|
|
1275
|
+
return None
|
|
1276
|
+
|
|
1277
|
+
config = {
|
|
1278
|
+
"subscription_index": idx,
|
|
1279
|
+
"subscription_types": list(subscription.type_names),
|
|
1280
|
+
}
|
|
1281
|
+
|
|
1282
|
+
# JoinSpec configuration
|
|
1283
|
+
if subscription.join:
|
|
1284
|
+
join_spec = subscription.join
|
|
1285
|
+
window_type = "time" if isinstance(join_spec.within, timedelta) else "count"
|
|
1286
|
+
window_value = (
|
|
1287
|
+
int(join_spec.within.total_seconds())
|
|
1288
|
+
if isinstance(join_spec.within, timedelta)
|
|
1289
|
+
else join_spec.within
|
|
1290
|
+
)
|
|
1291
|
+
|
|
1292
|
+
config["join"] = {
|
|
1293
|
+
"correlation_strategy": "by_key",
|
|
1294
|
+
"window_type": window_type,
|
|
1295
|
+
"window_value": window_value,
|
|
1296
|
+
"window_unit": "seconds" if window_type == "time" else "artifacts",
|
|
1297
|
+
"required_types": list(subscription.type_names),
|
|
1298
|
+
"type_counts": dict(subscription.type_counts),
|
|
1299
|
+
}
|
|
1300
|
+
|
|
1301
|
+
# Get waiting state from CorrelationEngine
|
|
1302
|
+
correlation_groups = _get_correlation_groups(
|
|
1303
|
+
orchestrator._correlation_engine, agent.name, idx
|
|
1304
|
+
)
|
|
1305
|
+
if correlation_groups:
|
|
1306
|
+
config["waiting_state"] = {
|
|
1307
|
+
"is_waiting": True,
|
|
1308
|
+
"correlation_groups": correlation_groups,
|
|
1309
|
+
}
|
|
1310
|
+
|
|
1311
|
+
# BatchSpec configuration
|
|
1312
|
+
if subscription.batch:
|
|
1313
|
+
batch_spec = subscription.batch
|
|
1314
|
+
strategy = (
|
|
1315
|
+
"hybrid"
|
|
1316
|
+
if batch_spec.size and batch_spec.timeout
|
|
1317
|
+
else "size"
|
|
1318
|
+
if batch_spec.size
|
|
1319
|
+
else "timeout"
|
|
1320
|
+
)
|
|
1321
|
+
|
|
1322
|
+
config["batch"] = {
|
|
1323
|
+
"strategy": strategy,
|
|
1324
|
+
}
|
|
1325
|
+
if batch_spec.size:
|
|
1326
|
+
config["batch"]["size"] = batch_spec.size
|
|
1327
|
+
if batch_spec.timeout:
|
|
1328
|
+
config["batch"]["timeout_seconds"] = int(batch_spec.timeout.total_seconds())
|
|
1329
|
+
|
|
1330
|
+
# Get waiting state from BatchEngine
|
|
1331
|
+
batch_state = _get_batch_state(orchestrator._batch_engine, agent.name, idx, batch_spec)
|
|
1332
|
+
if batch_state:
|
|
1333
|
+
if "waiting_state" not in config:
|
|
1334
|
+
config["waiting_state"] = {"is_waiting": True}
|
|
1335
|
+
config["waiting_state"]["batch_state"] = batch_state
|
|
1336
|
+
|
|
1337
|
+
return config
|
|
1338
|
+
|
|
1339
|
+
|
|
991
1340
|
__all__ = ["DashboardHTTPService"]
|
flock/frontend/package.json
CHANGED
|
@@ -398,7 +398,7 @@ describe('PublishControl', () => {
|
|
|
398
398
|
});
|
|
399
399
|
|
|
400
400
|
// Auto-filter checkbox tests
|
|
401
|
-
it('should render auto-set filter checkbox
|
|
401
|
+
it('should render auto-set filter checkbox unchecked by default', async () => {
|
|
402
402
|
mockFetch.mockResolvedValueOnce({
|
|
403
403
|
ok: true,
|
|
404
404
|
json: async () => ({ artifact_types: mockArtifactTypes }),
|
|
@@ -412,7 +412,7 @@ describe('PublishControl', () => {
|
|
|
412
412
|
|
|
413
413
|
const checkbox = screen.getByLabelText(/set filter to correlation id/i) as HTMLInputElement;
|
|
414
414
|
expect(checkbox).toBeInTheDocument();
|
|
415
|
-
expect(checkbox.checked).toBe(
|
|
415
|
+
expect(checkbox.checked).toBe(false);
|
|
416
416
|
});
|
|
417
417
|
|
|
418
418
|
it('should set filter to correlation ID when checkbox is checked and publish succeeds', async () => {
|
|
@@ -439,7 +439,8 @@ describe('PublishControl', () => {
|
|
|
439
439
|
const artifactTypeSelect = screen.getByLabelText(/artifact type/i);
|
|
440
440
|
const checkbox = screen.getByLabelText(/set filter to correlation id/i) as HTMLInputElement;
|
|
441
441
|
|
|
442
|
-
//
|
|
442
|
+
// Check the checkbox to enable auto-filter
|
|
443
|
+
fireEvent.click(checkbox);
|
|
443
444
|
expect(checkbox.checked).toBe(true);
|
|
444
445
|
|
|
445
446
|
fireEvent.change(artifactTypeSelect, { target: { value: 'Idea' } });
|
|
@@ -487,8 +488,7 @@ describe('PublishControl', () => {
|
|
|
487
488
|
const artifactTypeSelect = screen.getByLabelText(/artifact type/i);
|
|
488
489
|
const checkbox = screen.getByLabelText(/set filter to correlation id/i) as HTMLInputElement;
|
|
489
490
|
|
|
490
|
-
//
|
|
491
|
-
fireEvent.click(checkbox);
|
|
491
|
+
// Checkbox should already be unchecked by default
|
|
492
492
|
expect(checkbox.checked).toBe(false);
|
|
493
493
|
|
|
494
494
|
// Clear any existing filter
|
|
@@ -529,15 +529,15 @@ describe('PublishControl', () => {
|
|
|
529
529
|
|
|
530
530
|
const checkbox = screen.getByLabelText(/set filter to correlation id/i) as HTMLInputElement;
|
|
531
531
|
|
|
532
|
-
// Initially
|
|
533
|
-
expect(checkbox.checked).toBe(true);
|
|
534
|
-
|
|
535
|
-
// Uncheck
|
|
536
|
-
fireEvent.click(checkbox);
|
|
532
|
+
// Initially unchecked
|
|
537
533
|
expect(checkbox.checked).toBe(false);
|
|
538
534
|
|
|
539
|
-
// Check
|
|
535
|
+
// Check
|
|
540
536
|
fireEvent.click(checkbox);
|
|
541
537
|
expect(checkbox.checked).toBe(true);
|
|
538
|
+
|
|
539
|
+
// Uncheck again
|
|
540
|
+
fireEvent.click(checkbox);
|
|
541
|
+
expect(checkbox.checked).toBe(false);
|
|
542
542
|
});
|
|
543
543
|
});
|
|
@@ -22,7 +22,7 @@ const PublishControl: React.FC = () => {
|
|
|
22
22
|
const [errors, setErrors] = useState<ValidationErrors>({});
|
|
23
23
|
const [successMessage, setSuccessMessage] = useState('');
|
|
24
24
|
const [errorMessage, setErrorMessage] = useState('');
|
|
25
|
-
const [autoSetFilter, setAutoSetFilter] = useState(
|
|
25
|
+
const [autoSetFilter, setAutoSetFilter] = useState(false); // Default: unchecked (user can opt-in to auto-filter)
|
|
26
26
|
|
|
27
27
|
const setShowControls = useSettingsStore((state) => state.setShowControls);
|
|
28
28
|
|
|
@@ -2,6 +2,7 @@ import { memo, useState, useEffect, useRef } from 'react';
|
|
|
2
2
|
import { NodeProps, Handle, Position } from '@xyflow/react';
|
|
3
3
|
import { useUIStore } from '../../store/uiStore';
|
|
4
4
|
import { useSettingsStore } from '../../store/settingsStore';
|
|
5
|
+
import LogicOperationsDisplay from './LogicOperationsDisplay';
|
|
5
6
|
|
|
6
7
|
// UI Optimization Migration (Phase 4.1 - Spec 002): Backend GraphNode.data is Record<string, any>
|
|
7
8
|
// Agent-specific properties populated by backend snapshot
|
|
@@ -16,6 +17,7 @@ const AgentNode = memo(({ data, selected }: NodeProps) => {
|
|
|
16
17
|
const receivedByType = nodeData.receivedByType || {};
|
|
17
18
|
const sentByType = nodeData.sentByType || {};
|
|
18
19
|
const streamingTokens = nodeData.streamingTokens || [];
|
|
20
|
+
const logicOperations = nodeData.logicOperations || []; // Phase 1.4: Logic operations state
|
|
19
21
|
|
|
20
22
|
// Merge known types with actual counts - show all types even with 0 count
|
|
21
23
|
// Start with actual counts, then add known types that haven't happened yet
|
|
@@ -307,6 +309,8 @@ const AgentNode = memo(({ data, selected }: NodeProps) => {
|
|
|
307
309
|
</div>
|
|
308
310
|
</div>
|
|
309
311
|
)}
|
|
312
|
+
{/* Phase 1.4: Logic Operations Display (JoinSpec/BatchSpec waiting states) */}
|
|
313
|
+
<LogicOperationsDisplay logicOperations={logicOperations} compactNodeView={compactNodeView} />
|
|
310
314
|
</div>
|
|
311
315
|
)}
|
|
312
316
|
{compactNodeView && (
|
|
@@ -15,6 +15,8 @@ import AgentNode from './AgentNode';
|
|
|
15
15
|
import MessageNode from './MessageNode';
|
|
16
16
|
import MessageFlowEdge from './MessageFlowEdge';
|
|
17
17
|
import TransformEdge from './TransformEdge';
|
|
18
|
+
import PendingJoinEdge from './PendingJoinEdge';
|
|
19
|
+
import PendingBatchEdge from './PendingBatchEdge';
|
|
18
20
|
import MiniMap from './MiniMap';
|
|
19
21
|
import { useGraphStore } from '../../store/graphStore';
|
|
20
22
|
import { useFilterStore } from '../../store/filterStore';
|
|
@@ -78,6 +80,8 @@ const GraphCanvas: React.FC = () => {
|
|
|
78
80
|
() => ({
|
|
79
81
|
message_flow: MessageFlowEdge,
|
|
80
82
|
transformation: TransformEdge,
|
|
83
|
+
pending_join: PendingJoinEdge, // Phase 1.5: Pending edges for JoinSpec correlation groups
|
|
84
|
+
pending_batch: PendingBatchEdge, // Phase 1.5: Pending edges for BatchSpec accumulation
|
|
81
85
|
}),
|
|
82
86
|
[]
|
|
83
87
|
);
|