flock-core 0.5.2__py3-none-any.whl → 0.5.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (31) hide show
  1. flock/agent.py +16 -3
  2. flock/artifact_collector.py +158 -0
  3. flock/batch_accumulator.py +252 -0
  4. flock/correlation_engine.py +223 -0
  5. flock/dashboard/collector.py +4 -0
  6. flock/dashboard/events.py +74 -0
  7. flock/dashboard/graph_builder.py +272 -0
  8. flock/dashboard/models/graph.py +3 -1
  9. flock/dashboard/service.py +363 -14
  10. flock/frontend/package.json +1 -1
  11. flock/frontend/src/components/controls/PublishControl.test.tsx +11 -11
  12. flock/frontend/src/components/controls/PublishControl.tsx +1 -1
  13. flock/frontend/src/components/graph/AgentNode.tsx +4 -0
  14. flock/frontend/src/components/graph/GraphCanvas.tsx +4 -0
  15. flock/frontend/src/components/graph/LogicOperationsDisplay.tsx +463 -0
  16. flock/frontend/src/components/graph/PendingBatchEdge.tsx +141 -0
  17. flock/frontend/src/components/graph/PendingJoinEdge.tsx +144 -0
  18. flock/frontend/src/services/graphService.ts +3 -1
  19. flock/frontend/src/services/websocket.ts +99 -1
  20. flock/frontend/src/store/graphStore.test.ts +2 -1
  21. flock/frontend/src/store/graphStore.ts +36 -5
  22. flock/frontend/src/types/graph.ts +86 -0
  23. flock/orchestrator.py +263 -3
  24. flock/patches/__init__.py +1 -0
  25. flock/patches/dspy_streaming_patch.py +1 -0
  26. flock/subscription.py +70 -7
  27. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/METADATA +70 -14
  28. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/RECORD +31 -25
  29. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/WHEEL +0 -0
  30. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/entry_points.txt +0 -0
  31. {flock_core-0.5.2.dist-info → flock_core-0.5.4.dist-info}/licenses/LICENSE +0 -0
flock/orchestrator.py CHANGED
@@ -18,7 +18,10 @@ from opentelemetry.trace import Status, StatusCode
18
18
  from pydantic import BaseModel
19
19
 
20
20
  from flock.agent import Agent, AgentBuilder
21
+ from flock.artifact_collector import ArtifactCollector
21
22
  from flock.artifacts import Artifact
23
+ from flock.batch_accumulator import BatchEngine
24
+ from flock.correlation_engine import CorrelationEngine
22
25
  from flock.helper.cli_helper import init_console
23
26
  from flock.logging.auto_trace import AutoTracedMeta
24
27
  from flock.mcp import (
@@ -128,6 +131,14 @@ class Flock(metaclass=AutoTracedMeta):
128
131
  self.max_agent_iterations: int = max_agent_iterations
129
132
  self._agent_iteration_count: dict[str, int] = {}
130
133
  self.is_dashboard: bool = False
134
+ # AND gate logic: Artifact collection for multi-type subscriptions
135
+ self._artifact_collector = ArtifactCollector()
136
+ # JoinSpec logic: Correlation engine for correlated AND gates
137
+ self._correlation_engine = CorrelationEngine()
138
+ # BatchSpec logic: Batch accumulator for size/timeout batching
139
+ self._batch_engine = BatchEngine()
140
+ # Phase 1.2: WebSocket manager for real-time dashboard events (set by serve())
141
+ self._websocket_manager: Any = None
131
142
  # Unified tracing support
132
143
  self._workflow_span = None
133
144
  self._auto_workflow_enabled = os.getenv("FLOCK_AUTO_WORKFLOW_TRACE", "false").lower() in {
@@ -593,6 +604,8 @@ class Flock(metaclass=AutoTracedMeta):
593
604
 
594
605
  # Store collector reference for agents added later
595
606
  self._dashboard_collector = event_collector
607
+ # Store websocket manager for real-time event emission (Phase 1.2)
608
+ self._websocket_manager = websocket_manager
596
609
 
597
610
  # Inject event collector into all existing agents
598
611
  for agent in self._agents.values():
@@ -671,7 +684,11 @@ class Flock(metaclass=AutoTracedMeta):
671
684
  self.is_dashboard = is_dashboard
672
685
  # Only show banner in CLI mode, not dashboard mode
673
686
  if not self.is_dashboard:
674
- init_console(clear_screen=True, show_banner=True, model=self.model)
687
+ try:
688
+ init_console(clear_screen=True, show_banner=True, model=self.model)
689
+ except (UnicodeEncodeError, UnicodeDecodeError):
690
+ # Skip banner on Windows consoles with encoding issues (e.g., tests, CI)
691
+ pass
675
692
  # Handle different input types
676
693
  if isinstance(obj, Artifact):
677
694
  # Already an artifact - publish as-is
@@ -881,10 +898,103 @@ class Flock(metaclass=AutoTracedMeta):
881
898
  continue
882
899
  if self._seen_before(artifact, agent):
883
900
  continue
901
+
902
+ # JoinSpec CORRELATION: Check if subscription has correlated AND gate
903
+ if subscription.join is not None:
904
+ # Use CorrelationEngine for JoinSpec (correlated AND gates)
905
+ subscription_index = agent.subscriptions.index(subscription)
906
+ completed_group = self._correlation_engine.add_artifact(
907
+ artifact=artifact,
908
+ subscription=subscription,
909
+ subscription_index=subscription_index,
910
+ )
911
+
912
+ if completed_group is None:
913
+ # Still waiting for correlation to complete
914
+ # Phase 1.2: Emit real-time correlation update event
915
+ await self._emit_correlation_updated_event(
916
+ agent_name=agent.name,
917
+ subscription_index=subscription_index,
918
+ artifact=artifact,
919
+ )
920
+ continue
921
+
922
+ # Correlation complete! Get all correlated artifacts
923
+ artifacts = completed_group.get_artifacts()
924
+ else:
925
+ # AND GATE LOGIC: Use artifact collector for simple AND gates (no correlation)
926
+ is_complete, artifacts = self._artifact_collector.add_artifact(
927
+ agent, subscription, artifact
928
+ )
929
+
930
+ if not is_complete:
931
+ # Still waiting for more types (AND gate incomplete)
932
+ continue
933
+
934
+ # BatchSpec BATCHING: Check if subscription has batch accumulator
935
+ if subscription.batch is not None:
936
+ # Add to batch accumulator
937
+ subscription_index = agent.subscriptions.index(subscription)
938
+
939
+ # COMBINED FEATURES: JoinSpec + BatchSpec
940
+ # If we have JoinSpec, artifacts is a correlated GROUP - treat as single batch item
941
+ # If we have AND gate, artifacts is a complete set - treat as single batch item
942
+ # Otherwise (single type), add each artifact individually
943
+
944
+ if subscription.join is not None or len(subscription.type_models) > 1:
945
+ # JoinSpec or AND gate: Treat artifact group as ONE batch item
946
+ should_flush = self._batch_engine.add_artifact_group(
947
+ artifacts=artifacts,
948
+ subscription=subscription,
949
+ subscription_index=subscription_index,
950
+ )
951
+ else:
952
+ # Single type subscription: Add each artifact individually
953
+ should_flush = False
954
+ for single_artifact in artifacts:
955
+ should_flush = self._batch_engine.add_artifact(
956
+ artifact=single_artifact,
957
+ subscription=subscription,
958
+ subscription_index=subscription_index,
959
+ )
960
+
961
+ if should_flush:
962
+ # Size threshold reached! Flush batch now
963
+ break
964
+
965
+ if not should_flush:
966
+ # Batch not full yet - wait for more artifacts
967
+ # Phase 1.2: Emit real-time batch update event
968
+ await self._emit_batch_item_added_event(
969
+ agent_name=agent.name,
970
+ subscription_index=subscription_index,
971
+ subscription=subscription,
972
+ artifact=artifact,
973
+ )
974
+ continue
975
+
976
+ # Flush the batch and get all accumulated artifacts
977
+ batched_artifacts = self._batch_engine.flush_batch(
978
+ agent.name, subscription_index
979
+ )
980
+
981
+ if batched_artifacts is None:
982
+ # No batch to flush (shouldn't happen, but defensive)
983
+ continue
984
+
985
+ # Replace artifacts with batched artifacts
986
+ artifacts = batched_artifacts
987
+
988
+ # Complete! Schedule agent with all collected artifacts
884
989
  # T068: Increment iteration counter
885
990
  self._agent_iteration_count[agent.name] = iteration_count + 1
886
- self._mark_processed(artifact, agent)
887
- self._schedule_task(agent, [artifact])
991
+
992
+ # Mark all artifacts as processed (prevent duplicate triggers)
993
+ for collected_artifact in artifacts:
994
+ self._mark_processed(collected_artifact, agent)
995
+
996
+ # Schedule agent with ALL artifacts (batched, correlated, or AND gate complete)
997
+ self._schedule_task(agent, artifacts)
888
998
 
889
999
  def _schedule_task(self, agent: Agent, artifacts: list[Artifact]) -> None:
890
1000
  task = asyncio.create_task(self._run_agent_task(agent, artifacts))
@@ -933,6 +1043,156 @@ class Flock(metaclass=AutoTracedMeta):
933
1043
  except Exception as exc: # pragma: no cover - defensive logging
934
1044
  self._logger.exception("Failed to record artifact consumption: %s", exc)
935
1045
 
1046
+ # Phase 1.2: Logic Operations Event Emission ----------------------------
1047
+
1048
+ async def _emit_correlation_updated_event(
1049
+ self, *, agent_name: str, subscription_index: int, artifact: Artifact
1050
+ ) -> None:
1051
+ """Emit CorrelationGroupUpdatedEvent for real-time dashboard updates.
1052
+
1053
+ Called when an artifact is added to a correlation group that is not yet complete.
1054
+
1055
+ Args:
1056
+ agent_name: Name of the agent with the JoinSpec subscription
1057
+ subscription_index: Index of the subscription in the agent's subscriptions list
1058
+ artifact: The artifact that triggered this update
1059
+ """
1060
+ # Only emit if dashboard is enabled
1061
+ if self._websocket_manager is None:
1062
+ return
1063
+
1064
+ # Import _get_correlation_groups helper from dashboard service
1065
+ from flock.dashboard.service import _get_correlation_groups
1066
+
1067
+ # Get current correlation groups state from engine
1068
+ groups = _get_correlation_groups(self._correlation_engine, agent_name, subscription_index)
1069
+
1070
+ if not groups:
1071
+ return # No groups to report (shouldn't happen, but defensive)
1072
+
1073
+ # Find the group that was just updated (match by last updated time or artifact ID)
1074
+ # For now, we'll emit an event for the FIRST group that's still waiting
1075
+ # In practice, the artifact we just added should be in one of these groups
1076
+ for group_state in groups:
1077
+ if not group_state["is_complete"]:
1078
+ # Import CorrelationGroupUpdatedEvent
1079
+ from flock.dashboard.events import CorrelationGroupUpdatedEvent
1080
+
1081
+ # Build and emit event
1082
+ event = CorrelationGroupUpdatedEvent(
1083
+ agent_name=agent_name,
1084
+ subscription_index=subscription_index,
1085
+ correlation_key=group_state["correlation_key"],
1086
+ collected_types=group_state["collected_types"],
1087
+ required_types=group_state["required_types"],
1088
+ waiting_for=group_state["waiting_for"],
1089
+ elapsed_seconds=group_state["elapsed_seconds"],
1090
+ expires_in_seconds=group_state["expires_in_seconds"],
1091
+ expires_in_artifacts=group_state["expires_in_artifacts"],
1092
+ artifact_id=str(artifact.id),
1093
+ artifact_type=artifact.type,
1094
+ is_complete=group_state["is_complete"],
1095
+ )
1096
+
1097
+ # Broadcast via WebSocket
1098
+ await self._websocket_manager.broadcast(event)
1099
+ break # Only emit one event per artifact addition
1100
+
1101
+ async def _emit_batch_item_added_event(
1102
+ self,
1103
+ *,
1104
+ agent_name: str,
1105
+ subscription_index: int,
1106
+ subscription: Subscription, # noqa: F821
1107
+ artifact: Artifact,
1108
+ ) -> None:
1109
+ """Emit BatchItemAddedEvent for real-time dashboard updates.
1110
+
1111
+ Called when an artifact is added to a batch that hasn't reached flush threshold.
1112
+
1113
+ Args:
1114
+ agent_name: Name of the agent with the BatchSpec subscription
1115
+ subscription_index: Index of the subscription in the agent's subscriptions list
1116
+ subscription: The subscription with BatchSpec configuration
1117
+ artifact: The artifact that triggered this update
1118
+ """
1119
+ # Only emit if dashboard is enabled
1120
+ if self._websocket_manager is None:
1121
+ return
1122
+
1123
+ # Import _get_batch_state helper from dashboard service
1124
+ from flock.dashboard.service import _get_batch_state
1125
+
1126
+ # Get current batch state from engine
1127
+ batch_state = _get_batch_state(
1128
+ self._batch_engine, agent_name, subscription_index, subscription.batch
1129
+ )
1130
+
1131
+ if not batch_state:
1132
+ return # No batch to report (shouldn't happen, but defensive)
1133
+
1134
+ # Import BatchItemAddedEvent
1135
+ from flock.dashboard.events import BatchItemAddedEvent
1136
+
1137
+ # Build and emit event
1138
+ event = BatchItemAddedEvent(
1139
+ agent_name=agent_name,
1140
+ subscription_index=subscription_index,
1141
+ items_collected=batch_state["items_collected"],
1142
+ items_target=batch_state.get("items_target"),
1143
+ items_remaining=batch_state.get("items_remaining"),
1144
+ elapsed_seconds=batch_state["elapsed_seconds"],
1145
+ timeout_seconds=batch_state.get("timeout_seconds"),
1146
+ timeout_remaining_seconds=batch_state.get("timeout_remaining_seconds"),
1147
+ will_flush=batch_state["will_flush"],
1148
+ artifact_id=str(artifact.id),
1149
+ artifact_type=artifact.type,
1150
+ )
1151
+
1152
+ # Broadcast via WebSocket
1153
+ await self._websocket_manager.broadcast(event)
1154
+
1155
+ # Batch Helpers --------------------------------------------------------
1156
+
1157
+ async def _check_batch_timeouts(self) -> None:
1158
+ """Check all batches for timeout expiry and flush expired batches.
1159
+
1160
+ This method is called periodically or manually (in tests) to enforce
1161
+ timeout-based batching.
1162
+ """
1163
+ expired_batches = self._batch_engine.check_timeouts()
1164
+
1165
+ for agent_name, subscription_index in expired_batches:
1166
+ # Flush the expired batch
1167
+ artifacts = self._batch_engine.flush_batch(agent_name, subscription_index)
1168
+
1169
+ if artifacts is None:
1170
+ continue
1171
+
1172
+ # Get the agent
1173
+ agent = self._agents.get(agent_name)
1174
+ if agent is None:
1175
+ continue
1176
+
1177
+ # Schedule agent with batched artifacts
1178
+ self._schedule_task(agent, artifacts)
1179
+
1180
+ async def _flush_all_batches(self) -> None:
1181
+ """Flush all partial batches (for shutdown - ensures zero data loss)."""
1182
+ all_batches = self._batch_engine.flush_all()
1183
+
1184
+ for agent_name, _subscription_index, artifacts in all_batches:
1185
+ # Get the agent
1186
+ agent = self._agents.get(agent_name)
1187
+ if agent is None:
1188
+ continue
1189
+
1190
+ # Schedule agent with partial batch
1191
+ self._schedule_task(agent, artifacts)
1192
+
1193
+ # Wait for all scheduled tasks to complete
1194
+ await self.run_until_idle()
1195
+
936
1196
  # Helpers --------------------------------------------------------------
937
1197
 
938
1198
  def _normalize_input(
flock/patches/__init__.py CHANGED
@@ -2,4 +2,5 @@
2
2
 
3
3
  from flock.patches.dspy_streaming_patch import apply_patch, restore_original
4
4
 
5
+
5
6
  __all__ = ["apply_patch", "restore_original"]
@@ -10,6 +10,7 @@ This patch replaces it with a non-blocking fire-and-forget approach.
10
10
  import asyncio
11
11
  import logging
12
12
 
13
+
13
14
  logger = logging.getLogger(__name__)
14
15
 
15
16
 
flock/subscription.py CHANGED
@@ -4,6 +4,7 @@ from __future__ import annotations
4
4
 
5
5
  from collections.abc import Callable, Iterable, Sequence
6
6
  from dataclasses import dataclass
7
+ from datetime import timedelta
7
8
  from typing import TYPE_CHECKING, Any
8
9
 
9
10
  from pydantic import BaseModel
@@ -26,16 +27,68 @@ class TextPredicate:
26
27
 
27
28
  @dataclass
28
29
  class JoinSpec:
29
- kind: str
30
- window: float
31
- by: Callable[[Artifact], Any] | None = None
30
+ """
31
+ Specification for correlated AND gates.
32
+
33
+ Correlates artifacts by a common key within a time OR count window.
34
+
35
+ Examples:
36
+ # Time-based correlation (within 5 minutes)
37
+ JoinSpec(
38
+ by=lambda x: x.correlation_id,
39
+ within=timedelta(minutes=5)
40
+ )
41
+
42
+ # Count-based correlation (within next 10 artifacts)
43
+ JoinSpec(
44
+ by=lambda x: x.correlation_id,
45
+ within=10
46
+ )
47
+
48
+ Args:
49
+ by: Callable that extracts the correlation key from an artifact payload
50
+ within: Window for correlation
51
+ - timedelta: Time window (artifacts must arrive within this time)
52
+ - int: Count window (artifacts must arrive within N published artifacts)
53
+ """
54
+
55
+ by: Callable[[BaseModel], Any] # Extract correlation key from payload
56
+ within: timedelta | int # Time window OR count window for correlation
32
57
 
33
58
 
34
59
  @dataclass
35
60
  class BatchSpec:
36
- size: int
37
- within: float
38
- by: Callable[[Artifact], Any] | None = None
61
+ """
62
+ Specification for batch processing.
63
+
64
+ Accumulates artifacts and triggers agent when:
65
+ - Size threshold reached (e.g., batch of 10)
66
+ - Timeout expires (e.g., flush every 30 seconds)
67
+ - Whichever comes first
68
+
69
+ Examples:
70
+ # Size-based batching (flush when 25 artifacts accumulated)
71
+ BatchSpec(size=25)
72
+
73
+ # Timeout-based batching (flush every 30 seconds)
74
+ BatchSpec(timeout=timedelta(seconds=30))
75
+
76
+ # Hybrid (whichever comes first)
77
+ BatchSpec(size=100, timeout=timedelta(minutes=5))
78
+
79
+ Args:
80
+ size: Optional batch size threshold (flush when this many artifacts accumulated)
81
+ timeout: Optional timeout threshold (flush when this much time elapsed since first artifact)
82
+
83
+ Note: At least one of size or timeout must be specified.
84
+ """
85
+
86
+ size: int | None = None
87
+ timeout: timedelta | None = None
88
+
89
+ def __post_init__(self):
90
+ if self.size is None and self.timeout is None:
91
+ raise ValueError("BatchSpec requires at least one of: size, timeout")
39
92
 
40
93
 
41
94
  class Subscription:
@@ -60,7 +113,17 @@ class Subscription:
60
113
  raise ValueError("Subscription must declare at least one type.")
61
114
  self.agent_name = agent_name
62
115
  self.type_models: list[type[BaseModel]] = list(types)
63
- self.type_names: set[str] = {type_registry.register(t) for t in types}
116
+
117
+ # Register all types and build counts (supports duplicates for count-based AND gates)
118
+ type_name_list = [type_registry.register(t) for t in types]
119
+ self.type_names: set[str] = set(type_name_list) # Unique type names (for matching)
120
+
121
+ # Count-based AND gate: Track how many of each type are required
122
+ # Example: .consumes(A, A, B) → {"TypeA": 2, "TypeB": 1}
123
+ self.type_counts: dict[str, int] = {}
124
+ for type_name in type_name_list:
125
+ self.type_counts[type_name] = self.type_counts.get(type_name, 0) + 1
126
+
64
127
  self.where = list(where or [])
65
128
  self.text_predicates = list(text_predicates or [])
66
129
  self.from_agents = set(from_agents or [])
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flock-core
3
- Version: 0.5.2
3
+ Version: 0.5.4
4
4
  Summary: Flock: A declrative framework for building and orchestrating AI agents.
5
5
  Author-email: Andre Ratzenberger <andre.ratzenberger@whiteduck.de>
6
6
  License: MIT
@@ -266,7 +266,7 @@ flock = Flock(os.getenv("DEFAULT_MODEL", "openai/gpt-4.1"))
266
266
  bug_detector = flock.agent("bug_detector").consumes(CodeSubmission).publishes(BugAnalysis)
267
267
  security_auditor = flock.agent("security_auditor").consumes(CodeSubmission).publishes(SecurityAnalysis)
268
268
 
269
- # This agent AUTOMATICALLY waits for both analyses
269
+ # AND gate: This agent AUTOMATICALLY waits for BOTH analyses before triggering
270
270
  final_reviewer = flock.agent("final_reviewer").consumes(BugAnalysis, SecurityAnalysis).publishes(FinalReview)
271
271
 
272
272
  # 4. Run with real-time dashboard
@@ -343,29 +343,85 @@ analyzer = (
343
343
  )
344
344
  ```
345
345
 
346
- **Advanced subscriptions:**
346
+ **Logic Operations (AND/OR Gates):**
347
+
348
+ Flock provides intuitive syntax for coordinating multiple input types:
347
349
 
348
350
  ```python
349
- # Conditional consumption - only high-severity cases
351
+ # AND gate: Wait for BOTH types before triggering
352
+ diagnostician = flock.agent("diagnostician").consumes(XRayAnalysis, LabResults).publishes(Diagnosis)
353
+ # Agent triggers only when both XRayAnalysis AND LabResults are available
354
+
355
+ # OR gate: Trigger on EITHER type (via chaining)
356
+ alert_handler = flock.agent("alerts").consumes(SystemAlert).consumes(UserAlert).publishes(Response)
357
+ # Agent triggers when SystemAlert OR UserAlert is published
358
+
359
+ # Count-based AND gate: Wait for MULTIPLE instances of the same type
360
+ aggregator = flock.agent("aggregator").consumes(Order, Order, Order).publishes(BatchSummary)
361
+ # Agent triggers when THREE Order artifacts are available
362
+
363
+ # Mixed counts: Different requirements per type
364
+ validator = flock.agent("validator").consumes(Image, Image, Metadata).publishes(ValidationResult)
365
+ # Agent triggers when TWO Images AND ONE Metadata are available
366
+ ```
367
+
368
+ **What just happened:**
369
+ - ✅ **Natural syntax** - Code clearly expresses intent ("wait for 3 orders")
370
+ - ✅ **Order-independent** - Artifacts can arrive in any sequence
371
+ - ✅ **Latest wins** - If 4 As arrive but need 3, uses the 3 most recent
372
+ - ✅ **Zero configuration** - No manual coordination logic needed
373
+
374
+ **Advanced subscriptions unlock crazy powerful patterns:**
375
+
376
+ <p align="center">
377
+ <img alt="Event Join" src="docs/assets/images/join.png" width="800">
378
+ </p>
379
+
380
+ ```python
381
+ # 🎯 Predicates - Smart filtering (only process critical cases)
350
382
  urgent_care = flock.agent("urgent").consumes(
351
383
  Diagnosis,
352
- where=lambda d: d.severity in ["Critical", "High"]
384
+ where=lambda d: d.severity in ["Critical", "High"] # Conditional routing!
353
385
  )
354
386
 
355
- # Batch processing - wait for 10 items
356
- batch_processor = flock.agent("batch").consumes(
357
- Event,
358
- batch=BatchSpec(size=10, timeout=timedelta(seconds=30))
387
+ # 📦 BatchSpec - Cost optimization (process 10 at once = 90% cheaper API calls)
388
+ payment_processor = flock.agent("payments").consumes(
389
+ Transaction,
390
+ batch=BatchSpec(size=25, timeout=timedelta(seconds=30)) # $5 saved per batch!
359
391
  )
360
392
 
361
- # Join operations - wait for multiple types within time window
362
- correlator = flock.agent("correlator").consumes(
363
- SignalA,
364
- SignalB,
365
- join=JoinSpec(within=timedelta(minutes=5))
393
+ # 🔗 JoinSpec - Data correlation (match orders + shipments by ID)
394
+ customer_service = flock.agent("notifications").consumes(
395
+ Order,
396
+ Shipment,
397
+ join=JoinSpec(by=lambda x: x.order_id, within=timedelta(hours=24)) # Correlated!
398
+ )
399
+
400
+ # 🏭 Combined Features - Correlate sensors, THEN batch for analysis
401
+ quality_control = flock.agent("qc").consumes(
402
+ TemperatureSensor,
403
+ PressureSensor,
404
+ join=JoinSpec(by=lambda x: x.device_id, within=timedelta(seconds=30)),
405
+ batch=BatchSpec(size=5, timeout=timedelta(seconds=45)) # IoT at scale!
366
406
  )
367
407
  ```
368
408
 
409
+ **What just happened:**
410
+ - ✅ **Predicates** route work by business rules ("only critical severity")
411
+ - ✅ **BatchSpec** optimizes costs (25 transactions = 1 API call instead of 25)
412
+ - ✅ **JoinSpec** correlates related data (orders ↔ shipments, sensors ↔ readings)
413
+ - ✅ **Combined** delivers production-grade multi-stage pipelines
414
+
415
+ **Real-world impact:**
416
+ - 💰 E-commerce: Save $5 per batch on payment processing fees
417
+ - 🏥 Healthcare: Correlate patient scans + lab results for diagnosis
418
+ - 🏭 Manufacturing: Monitor 1000+ IoT sensors with efficient batching
419
+ - 📊 Finance: Match trades + confirmations within 5-minute windows
420
+
421
+ <p align="center">
422
+ <img alt="Event Batch" src="docs/assets/images/batch.png" width="800">
423
+ </p>
424
+
369
425
  ### Visibility Controls (The Security)
370
426
 
371
427
  **Unlike other frameworks, Flock has zero-trust security built-in:**