flock-core 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (31) hide show
  1. flock/agent.py +16 -3
  2. flock/artifact_collector.py +158 -0
  3. flock/batch_accumulator.py +252 -0
  4. flock/correlation_engine.py +223 -0
  5. flock/dashboard/collector.py +4 -0
  6. flock/dashboard/events.py +74 -0
  7. flock/dashboard/graph_builder.py +272 -0
  8. flock/dashboard/models/graph.py +3 -1
  9. flock/dashboard/service.py +363 -14
  10. flock/frontend/package.json +1 -1
  11. flock/frontend/src/components/controls/PublishControl.test.tsx +11 -11
  12. flock/frontend/src/components/controls/PublishControl.tsx +1 -1
  13. flock/frontend/src/components/graph/AgentNode.tsx +4 -0
  14. flock/frontend/src/components/graph/GraphCanvas.tsx +4 -0
  15. flock/frontend/src/components/graph/LogicOperationsDisplay.tsx +463 -0
  16. flock/frontend/src/components/graph/PendingBatchEdge.tsx +141 -0
  17. flock/frontend/src/components/graph/PendingJoinEdge.tsx +144 -0
  18. flock/frontend/src/services/graphService.ts +3 -1
  19. flock/frontend/src/services/websocket.ts +99 -1
  20. flock/frontend/src/store/graphStore.test.ts +2 -1
  21. flock/frontend/src/store/graphStore.ts +36 -5
  22. flock/frontend/src/types/graph.ts +86 -0
  23. flock/orchestrator.py +263 -3
  24. flock/patches/__init__.py +1 -0
  25. flock/patches/dspy_streaming_patch.py +1 -0
  26. flock/subscription.py +70 -7
  27. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/METADATA +70 -14
  28. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/RECORD +31 -25
  29. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/WHEEL +0 -0
  30. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/entry_points.txt +0 -0
  31. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/licenses/LICENSE +0 -0
flock/agent.py CHANGED
@@ -988,10 +988,23 @@ class AgentBuilder:
988
988
  def _normalize_join(self, value: dict | JoinSpec | None) -> JoinSpec | None:
989
989
  if value is None or isinstance(value, JoinSpec):
990
990
  return value
991
+ # Phase 2: New JoinSpec API with 'by' and 'within' (time OR count)
992
+ from datetime import timedelta
993
+
994
+ within_value = value.get("within")
995
+ if isinstance(within_value, (int, float)):
996
+ # Count window or seconds as float - keep as is
997
+ within = (
998
+ int(within_value)
999
+ if isinstance(within_value, int)
1000
+ else timedelta(seconds=within_value)
1001
+ )
1002
+ else:
1003
+ # Default to 1 minute time window
1004
+ within = timedelta(minutes=1)
991
1005
  return JoinSpec(
992
- kind=value.get("kind", "all_of"),
993
- window=float(value.get("window", 0.0)),
994
- by=value.get("by"),
1006
+ by=value["by"], # Required
1007
+ within=within,
995
1008
  )
996
1009
 
997
1010
  def _normalize_batch(self, value: dict | BatchSpec | None) -> BatchSpec | None:
@@ -0,0 +1,158 @@
1
+ """Artifact collection and waiting pool management for AND gate logic.
2
+
3
+ This module implements the waiting pool mechanism that enables `.consumes(A, B)`
4
+ to wait for BOTH types before triggering an agent (AND gate logic).
5
+
6
+ Architecture:
7
+ - Each subscription gets a unique waiting pool identified by (agent_name, subscription_index)
8
+ - Artifacts are collected per type until all required types are present
9
+ - When complete, all collected artifacts are returned for agent execution
10
+ - After triggering, the waiting pool is cleared for the next cycle
11
+ """
12
+
13
+ from __future__ import annotations
14
+
15
+ from collections import defaultdict
16
+ from typing import TYPE_CHECKING
17
+
18
+
19
+ if TYPE_CHECKING:
20
+ from flock.agent import Agent
21
+ from flock.artifacts import Artifact
22
+ from flock.subscription import Subscription
23
+
24
+
25
+ class ArtifactCollector:
26
+ """Manages waiting pools for multi-type subscriptions (AND gate logic).
27
+
28
+ Each subscription with multiple types gets a waiting pool that collects
29
+ artifacts until all required types are present. Single-type subscriptions
30
+ bypass the waiting pool for immediate triggering.
31
+
32
+ Example:
33
+ agent.consumes(TypeA, TypeB) # Creates waiting pool for 2 types
34
+
35
+ # TypeA published → added to pool (not complete yet)
36
+ # TypeB published → added to pool (NOW complete!)
37
+ # → Agent triggered with [TypeA_artifact, TypeB_artifact]
38
+ # → Waiting pool cleared for next cycle
39
+ """
40
+
41
+ def __init__(self) -> None:
42
+ """Initialize empty waiting pools."""
43
+ # Structure: {(agent_name, subscription_index): {type_name: [artifact1, artifact2, ...]}}
44
+ # Example: {("diagnostician", 0): {"XRay": [artifact1], "LabResult": [artifact2]}}
45
+ # For count-based AND gates: {"TypeA": [artifact1, artifact2, artifact3]} (3 As collected)
46
+ self._waiting_pools: dict[tuple[str, int], dict[str, list[Artifact]]] = defaultdict(
47
+ lambda: defaultdict(list)
48
+ )
49
+
50
+ def add_artifact(
51
+ self,
52
+ agent: Agent,
53
+ subscription: Subscription,
54
+ artifact: Artifact,
55
+ ) -> tuple[bool, list[Artifact]]:
56
+ """Add artifact to waiting pool and check for completeness.
57
+
58
+ Args:
59
+ agent: Agent that will process the artifacts
60
+ subscription: Subscription that matched the artifact
61
+ artifact: Artifact to add to the waiting pool
62
+
63
+ Returns:
64
+ Tuple of (is_complete, artifacts):
65
+ - is_complete: True if all required types are now present
66
+ - artifacts: List of collected artifacts (empty if incomplete, all artifacts if complete)
67
+
68
+ Design Notes:
69
+ - Single-type subscriptions with count=1 bypass the pool and return immediately complete
70
+ - Multi-type or count-based subscriptions collect artifacts until all required counts met
71
+ - Latest artifacts win (keeps most recent N artifacts per type)
72
+ - After returning complete=True, the pool is automatically cleared
73
+ """
74
+ # Single-type subscription with count=1: No waiting needed (immediate trigger)
75
+ if len(subscription.type_names) == 1 and subscription.type_counts[artifact.type] == 1:
76
+ return (True, [artifact])
77
+
78
+ # Multi-type or count-based subscription: Use waiting pool (AND gate logic)
79
+
80
+ # Find subscription index (agents can have multiple subscriptions)
81
+ try:
82
+ subscription_index = agent.subscriptions.index(subscription)
83
+ except ValueError:
84
+ # Should never happen, but defensive programming
85
+ raise RuntimeError(
86
+ f"Subscription not found in agent {agent.name}. "
87
+ "This indicates an internal orchestrator error."
88
+ )
89
+
90
+ pool_key = (agent.name, subscription_index)
91
+
92
+ # Add artifact to pool (collect in list for count-based logic)
93
+ self._waiting_pools[pool_key][artifact.type].append(artifact)
94
+
95
+ # Check if all required counts are met
96
+ is_complete = True
97
+ for type_name, required_count in subscription.type_counts.items():
98
+ collected_count = len(self._waiting_pools[pool_key][type_name])
99
+ if collected_count < required_count:
100
+ is_complete = False
101
+ break
102
+
103
+ if is_complete:
104
+ # Complete! Collect all artifacts (flatten lists) and clear the pool
105
+ artifacts = []
106
+ for type_name, required_count in subscription.type_counts.items():
107
+ # Take exactly the required count (latest artifacts)
108
+ type_artifacts = self._waiting_pools[pool_key][type_name]
109
+ artifacts.extend(type_artifacts[:required_count])
110
+
111
+ del self._waiting_pools[pool_key] # Clear for next cycle
112
+ return (True, artifacts)
113
+ # Incomplete - still waiting for more artifacts
114
+ return (False, [])
115
+
116
+ def get_waiting_status(
117
+ self, agent: Agent, subscription_index: int
118
+ ) -> dict[str, list[Artifact]]:
119
+ """Get current waiting pool contents for debugging/inspection.
120
+
121
+ Args:
122
+ agent: Agent to inspect
123
+ subscription_index: Index of the subscription
124
+
125
+ Returns:
126
+ Dictionary mapping type names to lists of collected artifacts (empty if none)
127
+ """
128
+ pool_key = (agent.name, subscription_index)
129
+ # Return a copy to prevent external mutation
130
+ pool = self._waiting_pools.get(pool_key, {})
131
+ return {type_name: list(artifacts) for type_name, artifacts in pool.items()}
132
+
133
+ def clear_waiting_pool(self, agent: Agent, subscription_index: int) -> None:
134
+ """Manually clear a waiting pool.
135
+
136
+ Useful for cleanup or resetting agent state.
137
+
138
+ Args:
139
+ agent: Agent whose pool to clear
140
+ subscription_index: Index of the subscription
141
+ """
142
+ pool_key = (agent.name, subscription_index)
143
+ if pool_key in self._waiting_pools:
144
+ del self._waiting_pools[pool_key]
145
+
146
+ def clear_all_pools(self) -> None:
147
+ """Clear all waiting pools.
148
+
149
+ Useful for orchestrator shutdown or test cleanup.
150
+ """
151
+ self._waiting_pools.clear()
152
+
153
+ def get_pool_count(self) -> int:
154
+ """Get total number of active waiting pools (for metrics/debugging)."""
155
+ return len(self._waiting_pools)
156
+
157
+
158
+ __all__ = ["ArtifactCollector"]
@@ -0,0 +1,252 @@
1
+ """
2
+ BatchAccumulator: Manages batch collection with size/timeout triggers.
3
+
4
+ Supports BatchSpec-based batching:
5
+ - Accumulates artifacts in batches per subscription
6
+ - Flushes on size threshold (e.g., batch of 25)
7
+ - Flushes on timeout (e.g., every 30 seconds)
8
+ - Whichever comes first wins
9
+ - Ensures zero data loss on shutdown
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ from datetime import datetime
15
+ from typing import TYPE_CHECKING
16
+
17
+
18
+ if TYPE_CHECKING:
19
+ from flock.artifacts import Artifact
20
+ from flock.subscription import BatchSpec, Subscription
21
+
22
+
23
+ class BatchAccumulator:
24
+ """
25
+ Tracks artifact batches waiting for size/timeout triggers.
26
+
27
+ Example: For orders, accumulate 25 at a time to batch process payments.
28
+ When 25th order arrives OR 30 seconds elapse, flush the batch.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ *,
34
+ batch_spec: BatchSpec,
35
+ created_at: datetime,
36
+ ):
37
+ self.batch_spec = batch_spec
38
+ self.created_at = created_at # When first artifact arrived
39
+ self.artifacts: list[Artifact] = []
40
+
41
+ def add_artifact(self, artifact: Artifact) -> bool:
42
+ """
43
+ Add artifact to batch.
44
+
45
+ Returns:
46
+ True if batch should flush (size threshold reached), False otherwise
47
+ """
48
+ self.artifacts.append(artifact)
49
+
50
+ # Check size threshold
51
+ if self.batch_spec.size is not None:
52
+ if len(self.artifacts) >= self.batch_spec.size:
53
+ return True # Flush now (size threshold reached)
54
+
55
+ return False # Not ready to flush yet
56
+
57
+ def is_timeout_expired(self) -> bool:
58
+ """Check if timeout has expired since batch started."""
59
+ if self.batch_spec.timeout is None:
60
+ return False
61
+
62
+ elapsed = datetime.now() - self.created_at
63
+ return elapsed >= self.batch_spec.timeout
64
+
65
+ def get_artifacts(self) -> list[Artifact]:
66
+ """Get all artifacts in batch."""
67
+ return self.artifacts.copy()
68
+
69
+ def clear(self) -> None:
70
+ """Clear the batch after flush."""
71
+ self.artifacts.clear()
72
+
73
+
74
+ class BatchEngine:
75
+ """
76
+ Manages batch state for BatchSpec subscriptions.
77
+
78
+ Responsibilities:
79
+ 1. Accumulate artifacts per (agent, subscription_index)
80
+ 2. Track batch size and timeout per batch
81
+ 3. Return complete batches when size or timeout threshold met
82
+ 4. Provide shutdown flush for partial batches
83
+
84
+ Example usage:
85
+ engine = BatchEngine()
86
+
87
+ # Add artifact to batch
88
+ should_flush = engine.add_artifact(
89
+ artifact=order_artifact,
90
+ subscription=subscription, # Has BatchSpec
91
+ subscription_index=0,
92
+ )
93
+
94
+ if should_flush:
95
+ # Size threshold reached! Flush batch
96
+ artifacts = engine.flush_batch("agent_name", 0)
97
+ # Trigger agent with batch
98
+ """
99
+
100
+ def __init__(self):
101
+ # Batch state per (agent_name, subscription_index)
102
+ # Key: (agent_name, subscription_index)
103
+ # Value: BatchAccumulator
104
+ self.batches: dict[tuple[str, int], BatchAccumulator] = {}
105
+
106
+ def add_artifact(
107
+ self,
108
+ *,
109
+ artifact: Artifact,
110
+ subscription: Subscription,
111
+ subscription_index: int,
112
+ ) -> bool:
113
+ """
114
+ Add artifact to batch accumulator.
115
+
116
+ Returns:
117
+ True if batch should flush (size threshold reached), False otherwise
118
+ """
119
+ if subscription.batch is None:
120
+ raise ValueError("Subscription must have BatchSpec for batching")
121
+
122
+ batch_key = (subscription.agent_name, subscription_index)
123
+
124
+ # Get or create batch accumulator
125
+ if batch_key not in self.batches:
126
+ self.batches[batch_key] = BatchAccumulator(
127
+ batch_spec=subscription.batch,
128
+ created_at=datetime.now(),
129
+ )
130
+
131
+ accumulator = self.batches[batch_key]
132
+
133
+ # Add artifact to batch
134
+ should_flush = accumulator.add_artifact(artifact)
135
+
136
+ return should_flush
137
+
138
+ def add_artifact_group(
139
+ self,
140
+ *,
141
+ artifacts: list[Artifact],
142
+ subscription: Subscription,
143
+ subscription_index: int,
144
+ ) -> bool:
145
+ """
146
+ Add a GROUP of artifacts (e.g., correlated pair) as a SINGLE batch item.
147
+
148
+ This is used for JoinSpec + BatchSpec combinations where we want to batch
149
+ correlated groups, not individual artifacts.
150
+
151
+ Example: JoinSpec + BatchSpec(size=2) means "batch 2 correlated pairs",
152
+ not "batch 2 individual artifacts".
153
+
154
+ Returns:
155
+ True if batch should flush (size threshold reached), False otherwise
156
+ """
157
+ if subscription.batch is None:
158
+ raise ValueError("Subscription must have BatchSpec for batching")
159
+
160
+ batch_key = (subscription.agent_name, subscription_index)
161
+
162
+ # Get or create batch accumulator
163
+ if batch_key not in self.batches:
164
+ self.batches[batch_key] = BatchAccumulator(
165
+ batch_spec=subscription.batch,
166
+ created_at=datetime.now(),
167
+ )
168
+
169
+ accumulator = self.batches[batch_key]
170
+
171
+ # Add ALL artifacts from the group
172
+ for artifact in artifacts:
173
+ accumulator.artifacts.append(artifact)
174
+
175
+ # Check size threshold - count GROUPS, not artifacts
176
+ # We track how many groups have been added by checking batch_spec metadata
177
+ if subscription.batch.size is not None:
178
+ # For group batching, we need to track group count separately
179
+ # For now, we'll use a simple heuristic: count groups by dividing by expected group size
180
+ # But this is NOT perfect - we need better tracking
181
+
182
+ # BETTER APPROACH: Count how many times we've called add_artifact_group
183
+ # For now, let's use artifact count as a proxy and check if we've hit the threshold
184
+ # This will work correctly if all groups are the same size
185
+
186
+ # Actually, let's track group count properly:
187
+ if not hasattr(accumulator, "_group_count"):
188
+ accumulator._group_count = 0
189
+
190
+ accumulator._group_count += 1
191
+
192
+ if accumulator._group_count >= subscription.batch.size:
193
+ return True # Flush now
194
+
195
+ return False # Not ready to flush yet
196
+
197
+ def flush_batch(self, agent_name: str, subscription_index: int) -> list[Artifact] | None:
198
+ """
199
+ Flush a batch and return its artifacts.
200
+
201
+ Returns:
202
+ List of artifacts in batch, or None if no batch exists
203
+ """
204
+ batch_key = (agent_name, subscription_index)
205
+
206
+ accumulator = self.batches.get(batch_key)
207
+ if accumulator is None or not accumulator.artifacts:
208
+ return None
209
+
210
+ # Get artifacts and clear batch
211
+ artifacts = accumulator.get_artifacts()
212
+ del self.batches[batch_key]
213
+
214
+ return artifacts
215
+
216
+ def check_timeouts(self) -> list[tuple[str, int]]:
217
+ """
218
+ Check all batches for timeout expiry.
219
+
220
+ Returns:
221
+ List of (agent_name, subscription_index) tuples that should flush
222
+ """
223
+ expired = []
224
+
225
+ for batch_key, accumulator in list(self.batches.items()):
226
+ if accumulator.is_timeout_expired():
227
+ expired.append(batch_key)
228
+
229
+ return expired
230
+
231
+ def flush_all(self) -> list[tuple[str, int, list[Artifact]]]:
232
+ """
233
+ Flush ALL partial batches (for shutdown).
234
+
235
+ Returns:
236
+ List of (agent_name, subscription_index, artifacts) tuples
237
+ """
238
+ results = []
239
+
240
+ for batch_key, accumulator in list(self.batches.items()):
241
+ if accumulator.artifacts:
242
+ artifacts = accumulator.get_artifacts()
243
+ agent_name, subscription_index = batch_key
244
+ results.append((agent_name, subscription_index, artifacts))
245
+
246
+ # Clear all batches after flush
247
+ self.batches.clear()
248
+
249
+ return results
250
+
251
+
252
+ __all__ = ["BatchAccumulator", "BatchEngine"]
@@ -0,0 +1,223 @@
1
+ """
2
+ CorrelationEngine: Manages correlated AND gates with time/count windows.
3
+
4
+ Supports JoinSpec-based correlation:
5
+ - Extracts correlation keys from artifacts
6
+ - Groups artifacts by correlation key
7
+ - Enforces time windows (timedelta) or count windows (int)
8
+ - Triggers agents when all required types arrive within window
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from collections import defaultdict
14
+ from datetime import datetime, timedelta
15
+ from typing import TYPE_CHECKING, Any
16
+
17
+
18
+ if TYPE_CHECKING:
19
+ from flock.artifacts import Artifact
20
+ from flock.subscription import JoinSpec, Subscription
21
+
22
+
23
+ class CorrelationGroup:
24
+ """
25
+ Tracks artifacts waiting for correlation within a specific key group.
26
+
27
+ Example: For patient-123, track X-ray (TypeA) and Lab results (TypeB).
28
+ When both arrive within time/count window, trigger the agent.
29
+ """
30
+
31
+ def __init__(
32
+ self,
33
+ *,
34
+ correlation_key: Any,
35
+ required_types: set[str],
36
+ type_counts: dict[str, int],
37
+ window_spec: timedelta | int,
38
+ created_at_sequence: int,
39
+ ):
40
+ self.correlation_key = correlation_key
41
+ self.required_types = required_types # e.g., {"TypeA", "TypeB"}
42
+ self.type_counts = type_counts # e.g., {"TypeA": 1, "TypeB": 1}
43
+ self.window_spec = window_spec # timedelta or int
44
+ self.created_at_sequence = (
45
+ created_at_sequence # Global sequence when first artifact arrived
46
+ )
47
+ self.created_at_time: datetime | None = None # Timestamp when first artifact arrived
48
+
49
+ # Waiting pool: type -> list of artifacts
50
+ self.waiting_artifacts: dict[str, list[Artifact]] = defaultdict(list)
51
+
52
+ def add_artifact(self, artifact: Artifact, current_sequence: int) -> None:
53
+ """Add artifact to this correlation group's waiting pool."""
54
+ if self.created_at_time is None:
55
+ from datetime import timezone
56
+
57
+ self.created_at_time = datetime.now(timezone.utc)
58
+
59
+ self.waiting_artifacts[artifact.type].append(artifact)
60
+
61
+ def is_complete(self) -> bool:
62
+ """Check if all required types have arrived with correct counts."""
63
+ for type_name, required_count in self.type_counts.items():
64
+ if len(self.waiting_artifacts.get(type_name, [])) < required_count:
65
+ return False
66
+ return True
67
+
68
+ def is_expired(self, current_sequence: int) -> bool:
69
+ """Check if this correlation group has expired based on window."""
70
+ if isinstance(self.window_spec, int):
71
+ # Count window: expired if current sequence exceeds created + window
72
+ return (current_sequence - self.created_at_sequence) > self.window_spec
73
+ if isinstance(self.window_spec, timedelta):
74
+ # Time window: expired if current time exceeds created + window
75
+ if self.created_at_time is None:
76
+ return False
77
+ from datetime import timezone
78
+
79
+ elapsed = datetime.now(timezone.utc) - self.created_at_time
80
+ return elapsed > self.window_spec
81
+ return False
82
+
83
+ def get_artifacts(self) -> list[Artifact]:
84
+ """Get all artifacts in the order they should be passed to the agent."""
85
+ result = []
86
+ for type_name in self.required_types:
87
+ # Get the required number of artifacts for this type
88
+ required_count = self.type_counts[type_name]
89
+ artifacts_for_type = self.waiting_artifacts[type_name][:required_count]
90
+ result.extend(artifacts_for_type)
91
+ return result
92
+
93
+
94
+ class CorrelationEngine:
95
+ """
96
+ Manages correlation state for JoinSpec subscriptions.
97
+
98
+ Responsibilities:
99
+ 1. Extract correlation keys from artifacts using JoinSpec.by lambda
100
+ 2. Group artifacts by correlation key
101
+ 3. Track time/count windows per correlation group
102
+ 4. Return complete correlation groups when all types arrive within window
103
+ 5. Clean up expired correlations
104
+
105
+ Example usage:
106
+ engine = CorrelationEngine()
107
+
108
+ # Add artifact to correlation tracking
109
+ completed = engine.add_artifact(
110
+ artifact=xray_artifact,
111
+ subscription=subscription, # Has JoinSpec with by + within
112
+ agent_name="diagnostician"
113
+ )
114
+
115
+ if completed:
116
+ # All types arrived! Trigger agent with correlated artifacts
117
+ artifacts = completed.get_artifacts()
118
+ """
119
+
120
+ def __init__(self):
121
+ # Global artifact sequence (for count windows)
122
+ self.global_sequence = 0
123
+
124
+ # Correlation state per (agent, subscription_index)
125
+ # Key: (agent_name, subscription_index)
126
+ # Value: dict[correlation_key, CorrelationGroup]
127
+ self.correlation_groups: dict[tuple[str, int], dict[Any, CorrelationGroup]] = defaultdict(
128
+ dict
129
+ )
130
+
131
+ def add_artifact(
132
+ self,
133
+ *,
134
+ artifact: Artifact,
135
+ subscription: Subscription,
136
+ subscription_index: int,
137
+ ) -> CorrelationGroup | None:
138
+ """
139
+ Add artifact to correlation tracking.
140
+
141
+ Returns:
142
+ CorrelationGroup if correlation is complete, None otherwise
143
+ """
144
+ # Increment global sequence (for count windows)
145
+ self.global_sequence += 1
146
+ current_sequence = self.global_sequence
147
+
148
+ # Extract correlation key using JoinSpec.by lambda
149
+ if subscription.join is None:
150
+ raise ValueError("Subscription must have JoinSpec for correlation")
151
+
152
+ join_spec: JoinSpec = subscription.join
153
+
154
+ # Parse artifact payload to extract correlation key
155
+ from flock.registry import type_registry
156
+
157
+ model_cls = type_registry.resolve(artifact.type)
158
+ payload_instance = model_cls(**artifact.payload)
159
+
160
+ try:
161
+ correlation_key = join_spec.by(payload_instance)
162
+ except Exception:
163
+ # Key extraction failed - skip this artifact
164
+ # TODO: Log warning?
165
+ return None
166
+
167
+ # Get or create correlation group for this key
168
+ pool_key = (subscription.agent_name, subscription_index)
169
+ groups = self.correlation_groups[pool_key]
170
+
171
+ if correlation_key not in groups:
172
+ # Create new correlation group
173
+ groups[correlation_key] = CorrelationGroup(
174
+ correlation_key=correlation_key,
175
+ required_types=subscription.type_names,
176
+ type_counts=subscription.type_counts,
177
+ window_spec=join_spec.within,
178
+ created_at_sequence=current_sequence,
179
+ )
180
+
181
+ group = groups[correlation_key]
182
+
183
+ # Check if group expired (for count windows, check BEFORE adding)
184
+ if group.is_expired(current_sequence):
185
+ # Group expired - remove it and start fresh
186
+ del groups[correlation_key]
187
+ # Create new group
188
+ groups[correlation_key] = CorrelationGroup(
189
+ correlation_key=correlation_key,
190
+ required_types=subscription.type_names,
191
+ type_counts=subscription.type_counts,
192
+ window_spec=join_spec.within,
193
+ created_at_sequence=current_sequence,
194
+ )
195
+ group = groups[correlation_key]
196
+
197
+ # Add artifact to group
198
+ group.add_artifact(artifact, current_sequence)
199
+
200
+ # Check if correlation is complete
201
+ if group.is_complete():
202
+ # Complete! Remove from tracking and return
203
+ completed_group = groups.pop(correlation_key)
204
+ return completed_group
205
+
206
+ # Not complete yet
207
+ return None
208
+
209
+ def cleanup_expired(self, agent_name: str, subscription_index: int) -> None:
210
+ """Clean up expired correlation groups for a specific subscription."""
211
+ pool_key = (agent_name, subscription_index)
212
+ groups = self.correlation_groups.get(pool_key, {})
213
+
214
+ # Remove expired groups
215
+ expired_keys = [
216
+ key for key, group in groups.items() if group.is_expired(self.global_sequence)
217
+ ]
218
+
219
+ for key in expired_keys:
220
+ del groups[key]
221
+
222
+
223
+ __all__ = ["CorrelationEngine", "CorrelationGroup"]
@@ -80,6 +80,9 @@ class AgentSnapshot:
80
80
  first_seen: datetime
81
81
  last_seen: datetime
82
82
  signature: str
83
+ logic_operations: list[dict] = field(
84
+ default_factory=list
85
+ ) # Phase 1.2: JoinSpec/BatchSpec config
83
86
 
84
87
 
85
88
  class DashboardEventCollector(AgentComponent):
@@ -514,6 +517,7 @@ class DashboardEventCollector(AgentComponent):
514
517
  first_seen=snapshot.first_seen,
515
518
  last_seen=snapshot.last_seen,
516
519
  signature=snapshot.signature,
520
+ logic_operations=[dict(op) for op in snapshot.logic_operations], # Phase 1.2
517
521
  )
518
522
 
519
523
  def _snapshot_to_record(self, snapshot: AgentSnapshot) -> AgentSnapshotRecord: