flock-core 0.5.8__py3-none-any.whl → 0.5.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (52) hide show
  1. flock/agent.py +149 -62
  2. flock/api/themes.py +6 -2
  3. flock/artifact_collector.py +6 -3
  4. flock/batch_accumulator.py +3 -1
  5. flock/cli.py +3 -1
  6. flock/components.py +45 -56
  7. flock/context_provider.py +531 -0
  8. flock/correlation_engine.py +8 -4
  9. flock/dashboard/collector.py +48 -29
  10. flock/dashboard/events.py +10 -4
  11. flock/dashboard/launcher.py +3 -1
  12. flock/dashboard/models/graph.py +9 -3
  13. flock/dashboard/service.py +143 -72
  14. flock/dashboard/websocket.py +17 -4
  15. flock/engines/dspy_engine.py +174 -98
  16. flock/engines/examples/simple_batch_engine.py +9 -3
  17. flock/examples.py +6 -2
  18. flock/frontend/src/services/indexeddb.test.ts +4 -4
  19. flock/frontend/src/services/indexeddb.ts +1 -1
  20. flock/helper/cli_helper.py +14 -1
  21. flock/logging/auto_trace.py +6 -1
  22. flock/logging/formatters/enum_builder.py +3 -1
  23. flock/logging/formatters/theme_builder.py +32 -17
  24. flock/logging/formatters/themed_formatter.py +38 -22
  25. flock/logging/logging.py +21 -7
  26. flock/logging/telemetry.py +9 -3
  27. flock/logging/telemetry_exporter/duckdb_exporter.py +27 -25
  28. flock/logging/trace_and_logged.py +14 -5
  29. flock/mcp/__init__.py +3 -6
  30. flock/mcp/client.py +49 -19
  31. flock/mcp/config.py +12 -6
  32. flock/mcp/manager.py +6 -2
  33. flock/mcp/servers/sse/flock_sse_server.py +9 -3
  34. flock/mcp/servers/streamable_http/flock_streamable_http_server.py +6 -2
  35. flock/mcp/tool.py +18 -6
  36. flock/mcp/types/handlers.py +3 -1
  37. flock/mcp/types/types.py +9 -3
  38. flock/orchestrator.py +204 -50
  39. flock/orchestrator_component.py +15 -5
  40. flock/patches/dspy_streaming_patch.py +12 -4
  41. flock/registry.py +9 -3
  42. flock/runtime.py +69 -18
  43. flock/service.py +19 -6
  44. flock/store.py +29 -10
  45. flock/subscription.py +6 -4
  46. flock/utilities.py +41 -13
  47. flock/utility/output_utility_component.py +31 -11
  48. {flock_core-0.5.8.dist-info → flock_core-0.5.10.dist-info}/METADATA +134 -4
  49. {flock_core-0.5.8.dist-info → flock_core-0.5.10.dist-info}/RECORD +52 -51
  50. {flock_core-0.5.8.dist-info → flock_core-0.5.10.dist-info}/WHEEL +0 -0
  51. {flock_core-0.5.8.dist-info → flock_core-0.5.10.dist-info}/entry_points.txt +0 -0
  52. {flock_core-0.5.8.dist-info → flock_core-0.5.10.dist-info}/licenses/LICENSE +0 -0
flock/orchestrator.py CHANGED
@@ -95,6 +95,7 @@ class Flock(metaclass=AutoTracedMeta):
95
95
  *,
96
96
  store: BlackboardStore | None = None,
97
97
  max_agent_iterations: int = 1000,
98
+ context_provider: Any = None,
98
99
  ) -> None:
99
100
  """Initialize the Flock orchestrator for blackboard-based agent coordination.
100
101
 
@@ -104,32 +105,43 @@ class Flock(metaclass=AutoTracedMeta):
104
105
  store: Custom blackboard storage backend. Defaults to InMemoryBlackboardStore.
105
106
  max_agent_iterations: Circuit breaker limit to prevent runaway agent loops.
106
107
  Defaults to 1000 iterations per agent before reset.
108
+ context_provider: Global context provider for all agents (Phase 3 security fix).
109
+ If None, agents use DefaultContextProvider. Can be overridden per-agent.
107
110
 
108
111
  Examples:
109
112
  >>> # Basic initialization with default model
110
113
  >>> flock = Flock("openai/gpt-4.1")
111
114
 
112
115
  >>> # Custom storage backend
113
- >>> flock = Flock(
114
- ... "openai/gpt-4o",
115
- ... store=CustomBlackboardStore()
116
- ... )
116
+ >>> flock = Flock("openai/gpt-4o", store=CustomBlackboardStore())
117
117
 
118
118
  >>> # Circuit breaker configuration
119
+ >>> flock = Flock("openai/gpt-4.1", max_agent_iterations=500)
120
+
121
+ >>> # Global context provider (Phase 3 security fix)
122
+ >>> from flock.context_provider import DefaultContextProvider
119
123
  >>> flock = Flock(
120
- ... "openai/gpt-4.1",
121
- ... max_agent_iterations=500
124
+ ... "openai/gpt-4.1", context_provider=DefaultContextProvider()
122
125
  ... )
123
126
  """
124
127
  self._patch_litellm_proxy_imports()
125
128
  self._logger = logging.getLogger(__name__)
126
129
  self.model = model
130
+
131
+ try:
132
+ init_console(clear_screen=True, show_banner=True, model=self.model)
133
+ except (UnicodeEncodeError, UnicodeDecodeError):
134
+ # Skip banner on Windows consoles with encoding issues (e.g., tests, CI)
135
+ pass
136
+
127
137
  self.store: BlackboardStore = store or InMemoryBlackboardStore()
128
138
  self._agents: dict[str, Agent] = {}
129
139
  self._tasks: set[Task[Any]] = set()
130
140
  self._processed: set[tuple[str, str]] = set()
131
141
  self._lock = asyncio.Lock()
132
142
  self.metrics: dict[str, float] = {"artifacts_published": 0, "agent_runs": 0}
143
+ # Phase 3: Global context provider (security fix)
144
+ self._default_context_provider = context_provider
133
145
  # MCP integration
134
146
  self._mcp_configs: dict[str, FlockMCPConfiguration] = {}
135
147
  self._mcp_manager: FlockMCPClientManager | None = None
@@ -153,7 +165,9 @@ class Flock(metaclass=AutoTracedMeta):
153
165
  self._websocket_manager: Any = None
154
166
  # Unified tracing support
155
167
  self._workflow_span = None
156
- self._auto_workflow_enabled = os.getenv("FLOCK_AUTO_WORKFLOW_TRACE", "false").lower() in {
168
+ self._auto_workflow_enabled = os.getenv(
169
+ "FLOCK_AUTO_WORKFLOW_TRACE", "false"
170
+ ).lower() in {
157
171
  "true",
158
172
  "1",
159
173
  "yes",
@@ -357,7 +371,11 @@ class Flock(metaclass=AutoTracedMeta):
357
371
  path_str = str(abs_path)
358
372
 
359
373
  # Extract a meaningful name (last component of path)
360
- name = PathLib(path_str).name or path_str.rstrip("/").split("/")[-1] or "root"
374
+ name = (
375
+ PathLib(path_str).name
376
+ or path_str.rstrip("/").split("/")[-1]
377
+ or "root"
378
+ )
361
379
  mcp_roots.append(MCPRoot(uri=uri, name=name))
362
380
 
363
381
  # Build configuration
@@ -559,12 +577,17 @@ class Flock(metaclass=AutoTracedMeta):
559
577
  if pending_batches and (
560
578
  self._batch_timeout_task is None or self._batch_timeout_task.done()
561
579
  ):
562
- self._batch_timeout_task = asyncio.create_task(self._batch_timeout_checker_loop())
580
+ self._batch_timeout_task = asyncio.create_task(
581
+ self._batch_timeout_checker_loop()
582
+ )
563
583
 
564
584
  if pending_correlations and (
565
- self._correlation_cleanup_task is None or self._correlation_cleanup_task.done()
585
+ self._correlation_cleanup_task is None
586
+ or self._correlation_cleanup_task.done()
566
587
  ):
567
- self._correlation_cleanup_task = asyncio.create_task(self._correlation_cleanup_loop())
588
+ self._correlation_cleanup_task = asyncio.create_task(
589
+ self._correlation_cleanup_loop()
590
+ )
568
591
 
569
592
  # If deferred work is still outstanding, consider the orchestrator quiescent for
570
593
  # now but leave watchdog tasks running to finish the job.
@@ -585,15 +608,60 @@ class Flock(metaclass=AutoTracedMeta):
585
608
  async def direct_invoke(
586
609
  self, agent: Agent, inputs: Sequence[BaseModel | Mapping[str, Any] | Artifact]
587
610
  ) -> list[Artifact]:
588
- artifacts = [self._normalize_input(value, produced_by="__direct__") for value in inputs]
611
+ artifacts = [
612
+ self._normalize_input(value, produced_by="__direct__") for value in inputs
613
+ ]
589
614
  for artifact in artifacts:
590
615
  self._mark_processed(artifact, agent)
591
616
  await self._persist_and_schedule(artifact)
592
- ctx = Context(board=BoardHandle(self), orchestrator=self, task_id=str(uuid4()))
617
+
618
+ # Phase 8: Evaluate context BEFORE creating Context (security fix)
619
+ # Provider resolution: per-agent > global > DefaultContextProvider
620
+ from flock.context_provider import (
621
+ BoundContextProvider,
622
+ ContextRequest,
623
+ DefaultContextProvider,
624
+ )
625
+
626
+ inner_provider = (
627
+ getattr(agent, "context_provider", None)
628
+ or self._default_context_provider
629
+ or DefaultContextProvider()
630
+ )
631
+
632
+ # SECURITY FIX: Wrap provider with BoundContextProvider to prevent identity spoofing
633
+ provider = BoundContextProvider(inner_provider, agent.identity)
634
+
635
+ # Evaluate context using provider (orchestrator controls this!)
636
+ # Engines will receive pre-filtered artifacts via ctx.artifacts
637
+ correlation_id = (
638
+ artifacts[0].correlation_id
639
+ if artifacts and artifacts[0].correlation_id
640
+ else uuid4()
641
+ )
642
+ request = ContextRequest(
643
+ agent=agent,
644
+ correlation_id=correlation_id,
645
+ store=self.store,
646
+ agent_identity=agent.identity,
647
+ exclude_ids={a.id for a in artifacts}, # Exclude input artifacts
648
+ )
649
+ context_artifacts = await provider(request)
650
+
651
+ # Phase 8: Create Context with pre-filtered data (no capabilities!)
652
+ # SECURITY: Context is now just data - engines can't query anything
653
+ ctx = Context(
654
+ artifacts=context_artifacts, # Pre-filtered conversation context
655
+ agent_identity=agent.identity,
656
+ task_id=str(uuid4()),
657
+ correlation_id=correlation_id,
658
+ )
593
659
  self._record_agent_run(agent)
594
660
  return await agent.execute(ctx, artifacts)
595
661
 
596
- async def arun(self, agent_builder: AgentBuilder, *inputs: BaseModel) -> list[Artifact]:
662
+ async def arun(
663
+ self, agent_builder: AgentBuilder, *inputs: BaseModel
664
+ ) -> list[Artifact]:
597
665
  """Execute an agent with inputs and wait for all cascades to complete (async).
598
666
 
599
667
  Convenience method that combines direct agent invocation with run_until_idle().
@@ -614,9 +682,7 @@ class Flock(metaclass=AutoTracedMeta):
614
682
 
615
683
  >>> # Multiple inputs
616
684
  >>> results = await flock.arun(
617
- ... task_agent,
618
- ... Task(name="deploy"),
619
- ... Task(name="test")
685
+ ... task_agent, Task(name="deploy"), Task(name="test")
620
686
  ... )
621
687
 
622
688
  Note:
@@ -735,6 +801,15 @@ class Flock(metaclass=AutoTracedMeta):
735
801
  # Store websocket manager for real-time event emission (Phase 1.2)
736
802
  self._websocket_manager = websocket_manager
737
803
 
804
+ # Phase 6+7: Set class-level WebSocket broadcast wrapper (dashboard mode)
805
+ async def _broadcast_wrapper(event):
806
+ """Isolated broadcast wrapper - no reference chain to orchestrator."""
807
+ return await websocket_manager.broadcast(event)
808
+
809
+ from flock.agent import Agent
810
+
811
+ Agent._websocket_broadcast_global = _broadcast_wrapper
812
+
738
813
  # Inject event collector into all existing agents
739
814
  for agent in self._agents.values():
740
815
  # Add dashboard collector with priority ordering handled by agent
@@ -802,21 +877,12 @@ class Flock(metaclass=AutoTracedMeta):
802
877
 
803
878
  >>> # Publish with custom visibility
804
879
  >>> await orchestrator.publish(
805
- ... task,
806
- ... visibility=PrivateVisibility(agents={"admin"})
880
+ ... task, visibility=PrivateVisibility(agents={"admin"})
807
881
  ... )
808
882
 
809
883
  >>> # Publish with tags for channel routing
810
884
  >>> await orchestrator.publish(task, tags={"urgent", "backend"})
811
885
  """
812
- self.is_dashboard = is_dashboard
813
- # Only show banner in CLI mode, not dashboard mode
814
- if not self.is_dashboard:
815
- try:
816
- init_console(clear_screen=True, show_banner=True, model=self.model)
817
- except (UnicodeEncodeError, UnicodeDecodeError):
818
- # Skip banner on Windows consoles with encoding issues (e.g., tests, CI)
819
- pass
820
886
  # Handle different input types
821
887
  if isinstance(obj, Artifact):
822
888
  # Already an artifact - publish as-is
@@ -925,16 +991,12 @@ class Flock(metaclass=AutoTracedMeta):
925
991
  Examples:
926
992
  >>> # Testing: Execute agent without triggering others
927
993
  >>> results = await orchestrator.invoke(
928
- ... agent,
929
- ... Task(name="test", priority=5),
930
- ... publish_outputs=False
994
+ ... agent, Task(name="test", priority=5), publish_outputs=False
931
995
  ... )
932
996
 
933
997
  >>> # HTTP endpoint: Execute specific agent, allow cascade
934
998
  >>> results = await orchestrator.invoke(
935
- ... movie_agent,
936
- ... Idea(topic="AI", genre="comedy"),
937
- ... publish_outputs=True
999
+ ... movie_agent, Idea(topic="AI", genre="comedy"), publish_outputs=True
938
1000
  ... )
939
1001
  >>> await orchestrator.run_until_idle()
940
1002
  """
@@ -953,8 +1015,42 @@ class Flock(metaclass=AutoTracedMeta):
953
1015
  visibility=PublicVisibility(),
954
1016
  )
955
1017
 
956
- # Execute agent directly
957
- ctx = Context(board=BoardHandle(self), orchestrator=self, task_id=str(uuid4()))
1018
+ # Phase 8: Evaluate context BEFORE creating Context (security fix)
1019
+ # Provider resolution: per-agent > global > DefaultContextProvider
1020
+ from flock.context_provider import (
1021
+ BoundContextProvider,
1022
+ ContextRequest,
1023
+ DefaultContextProvider,
1024
+ )
1025
+
1026
+ inner_provider = (
1027
+ getattr(agent_obj, "context_provider", None)
1028
+ or self._default_context_provider
1029
+ or DefaultContextProvider()
1030
+ )
1031
+
1032
+ # SECURITY FIX: Wrap provider with BoundContextProvider to prevent identity spoofing
1033
+ provider = BoundContextProvider(inner_provider, agent_obj.identity)
1034
+
1035
+ # Evaluate context using provider (orchestrator controls this!)
1036
+ correlation_id = artifact.correlation_id if artifact.correlation_id else uuid4()
1037
+ request = ContextRequest(
1038
+ agent=agent_obj,
1039
+ correlation_id=correlation_id,
1040
+ store=self.store,
1041
+ agent_identity=agent_obj.identity,
1042
+ exclude_ids={artifact.id}, # Exclude input artifact
1043
+ )
1044
+ context_artifacts = await provider(request)
1045
+
1046
+ # Phase 8: Create Context with pre-filtered data (no capabilities!)
1047
+ # SECURITY: Context is now just data - engines can't query anything
1048
+ ctx = Context(
1049
+ artifacts=context_artifacts, # Pre-filtered conversation context
1050
+ agent_identity=agent_obj.identity,
1051
+ task_id=str(uuid4()),
1052
+ correlation_id=correlation_id,
1053
+ )
958
1054
  self._record_agent_run(agent_obj)
959
1055
 
960
1056
  # Execute with optional timeout
@@ -964,7 +1060,8 @@ class Flock(metaclass=AutoTracedMeta):
964
1060
  else:
965
1061
  outputs = await agent_obj.execute(ctx, [artifact])
966
1062
 
967
- # Optionally publish outputs to blackboard
1063
+ # Phase 6: Orchestrator publishes outputs (security fix)
1064
+ # Agents return artifacts, orchestrator validates and publishes
968
1065
  if publish_outputs:
969
1066
  for output in outputs:
970
1067
  await self._persist_and_schedule(output)
@@ -987,7 +1084,9 @@ class Flock(metaclass=AutoTracedMeta):
987
1084
  if self._components_initialized:
988
1085
  return
989
1086
 
990
- self._logger.info(f"Initializing {len(self._components)} orchestrator components")
1087
+ self._logger.info(
1088
+ f"Initializing {len(self._components)} orchestrator components"
1089
+ )
991
1090
 
992
1091
  for component in self._components:
993
1092
  comp_name = component.name or component.__class__.__name__
@@ -1061,7 +1160,9 @@ class Flock(metaclass=AutoTracedMeta):
1061
1160
  )
1062
1161
 
1063
1162
  try:
1064
- decision = await component.on_before_schedule(self, artifact, agent, subscription)
1163
+ decision = await component.on_before_schedule(
1164
+ self, artifact, agent, subscription
1165
+ )
1065
1166
 
1066
1167
  if decision == ScheduleDecision.SKIP:
1067
1168
  self._logger.info(
@@ -1105,7 +1206,9 @@ class Flock(metaclass=AutoTracedMeta):
1105
1206
  )
1106
1207
 
1107
1208
  try:
1108
- result = await component.on_collect_artifacts(self, artifact, agent, subscription)
1209
+ result = await component.on_collect_artifacts(
1210
+ self, artifact, agent, subscription
1211
+ )
1109
1212
 
1110
1213
  if result is not None:
1111
1214
  self._logger.debug(
@@ -1147,7 +1250,9 @@ class Flock(metaclass=AutoTracedMeta):
1147
1250
  )
1148
1251
 
1149
1252
  try:
1150
- result = await component.on_before_agent_schedule(self, agent, current_artifacts)
1253
+ result = await component.on_before_agent_schedule(
1254
+ self, agent, current_artifacts
1255
+ )
1151
1256
 
1152
1257
  if result is None:
1153
1258
  self._logger.info(
@@ -1218,7 +1323,9 @@ class Flock(metaclass=AutoTracedMeta):
1218
1323
  Components execute in priority order. Exceptions are logged but don't
1219
1324
  prevent shutdown of other components (best-effort cleanup).
1220
1325
  """
1221
- self._logger.info(f"Shutting down {len(self._components)} orchestrator components")
1326
+ self._logger.info(
1327
+ f"Shutting down {len(self._components)} orchestrator components"
1328
+ )
1222
1329
 
1223
1330
  for component in self._components:
1224
1331
  comp_name = component.name or component.__class__.__name__
@@ -1271,14 +1378,18 @@ class Flock(metaclass=AutoTracedMeta):
1271
1378
  # Phase 3: Component hook - before schedule (circuit breaker, deduplication, etc.)
1272
1379
  from flock.orchestrator_component import ScheduleDecision
1273
1380
 
1274
- decision = await self._run_before_schedule(artifact, agent, subscription)
1381
+ decision = await self._run_before_schedule(
1382
+ artifact, agent, subscription
1383
+ )
1275
1384
  if decision == ScheduleDecision.SKIP:
1276
1385
  continue # Skip this subscription
1277
1386
  if decision == ScheduleDecision.DEFER:
1278
1387
  continue # Defer for later (batching/correlation)
1279
1388
 
1280
1389
  # Phase 3: Component hook - collect artifacts (handles AND gates, correlation, batching)
1281
- collection = await self._run_collect_artifacts(artifact, agent, subscription)
1390
+ collection = await self._run_collect_artifacts(
1391
+ artifact, agent, subscription
1392
+ )
1282
1393
  if not collection.complete:
1283
1394
  continue # Still collecting (AND gate, correlation, or batch incomplete)
1284
1395
 
@@ -1292,7 +1403,9 @@ class Flock(metaclass=AutoTracedMeta):
1292
1403
  # Complete! Schedule agent with collected artifacts
1293
1404
  # Schedule agent task
1294
1405
  is_batch_execution = subscription.batch is not None
1295
- task = self._schedule_task(agent, artifacts, is_batch=is_batch_execution)
1406
+ task = self._schedule_task(
1407
+ agent, artifacts, is_batch=is_batch_execution
1408
+ )
1296
1409
 
1297
1410
  # Phase 3: Component hook - agent scheduled (notification)
1298
1411
  await self._run_agent_scheduled(agent, artifacts, task)
@@ -1301,7 +1414,9 @@ class Flock(metaclass=AutoTracedMeta):
1301
1414
  self, agent: Agent, artifacts: list[Artifact], is_batch: bool = False
1302
1415
  ) -> Task[Any]:
1303
1416
  """Schedule agent task and return the task handle."""
1304
- task = asyncio.create_task(self._run_agent_task(agent, artifacts, is_batch=is_batch))
1417
+ task = asyncio.create_task(
1418
+ self._run_agent_task(agent, artifacts, is_batch=is_batch)
1419
+ )
1305
1420
  self._tasks.add(task)
1306
1421
  task.add_done_callback(self._tasks.discard)
1307
1422
  return task
@@ -1322,15 +1437,52 @@ class Flock(metaclass=AutoTracedMeta):
1322
1437
  ) -> None:
1323
1438
  correlation_id = artifacts[0].correlation_id if artifacts else uuid4()
1324
1439
 
1440
+ # Phase 8: Evaluate context BEFORE creating Context (security fix)
1441
+ # Provider resolution: per-agent > global > DefaultContextProvider
1442
+ from flock.context_provider import (
1443
+ BoundContextProvider,
1444
+ ContextRequest,
1445
+ DefaultContextProvider,
1446
+ )
1447
+
1448
+ inner_provider = (
1449
+ getattr(agent, "context_provider", None)
1450
+ or self._default_context_provider
1451
+ or DefaultContextProvider()
1452
+ )
1453
+
1454
+ # SECURITY FIX: Wrap provider with BoundContextProvider to prevent identity spoofing
1455
+ provider = BoundContextProvider(inner_provider, agent.identity)
1456
+
1457
+ # Evaluate context using provider (orchestrator controls this!)
1458
+ # Engines will receive pre-filtered artifacts via ctx.artifacts
1459
+ request = ContextRequest(
1460
+ agent=agent,
1461
+ correlation_id=correlation_id,
1462
+ store=self.store,
1463
+ agent_identity=agent.identity,
1464
+ exclude_ids={a.id for a in artifacts}, # Exclude input artifacts
1465
+ )
1466
+ context_artifacts = await provider(request)
1467
+
1468
+ # Phase 8: Create Context with pre-filtered data (no capabilities!)
1469
+ # SECURITY: Context is now just data - engines can't query anything
1325
1470
  ctx = Context(
1326
- board=BoardHandle(self),
1327
- orchestrator=self,
1471
+ artifacts=context_artifacts, # Pre-filtered conversation context
1472
+ agent_identity=agent.identity,
1328
1473
  task_id=str(uuid4()),
1329
1474
  correlation_id=correlation_id,
1330
- is_batch=is_batch, # NEW!
1475
+ is_batch=is_batch,
1331
1476
  )
1332
1477
  self._record_agent_run(agent)
1333
- await agent.execute(ctx, artifacts)
1478
+
1479
+ # Phase 6: Execute agent (returns artifacts, doesn't publish)
1480
+ outputs = await agent.execute(ctx, artifacts)
1481
+
1482
+ # Phase 6: Orchestrator publishes outputs (security fix)
1483
+ # This fixes Vulnerability #2 (WRITE Bypass) - agents can't bypass validation
1484
+ for output in outputs:
1485
+ await self._persist_and_schedule(output)
1334
1486
 
1335
1487
  if artifacts:
1336
1488
  try:
@@ -1373,7 +1525,9 @@ class Flock(metaclass=AutoTracedMeta):
1373
1525
  from flock.dashboard.service import _get_correlation_groups
1374
1526
 
1375
1527
  # Get current correlation groups state from engine
1376
- groups = _get_correlation_groups(self._correlation_engine, agent_name, subscription_index)
1528
+ groups = _get_correlation_groups(
1529
+ self._correlation_engine, agent_name, subscription_index
1530
+ )
1377
1531
 
1378
1532
  if not groups:
1379
1533
  return # No groups to report (shouldn't happen, but defensive)
@@ -150,7 +150,9 @@ class OrchestratorComponent(BaseModel, metaclass=TracedModelMeta):
150
150
  >>> # Simple component
151
151
  >>> class LoggingComponent(OrchestratorComponent):
152
152
  ... async def on_agent_scheduled(self, orch, agent, artifacts, task):
153
- ... print(f"Agent {agent.name} scheduled with {len(artifacts)} artifacts")
153
+ ... print(
154
+ ... f"Agent {agent.name} scheduled with {len(artifacts)} artifacts"
155
+ ... )
154
156
 
155
157
  >>> # Circuit breaker component
156
158
  >>> class CircuitBreakerComponent(OrchestratorComponent):
@@ -166,7 +168,9 @@ class OrchestratorComponent(BaseModel, metaclass=TracedModelMeta):
166
168
  """
167
169
 
168
170
  name: str | None = None
169
- config: OrchestratorComponentConfig = Field(default_factory=OrchestratorComponentConfig)
171
+ config: OrchestratorComponentConfig = Field(
172
+ default_factory=OrchestratorComponentConfig
173
+ )
170
174
  priority: int = 0 # Lower priority = earlier execution
171
175
 
172
176
  # ──────────────────────────────────────────────────────────
@@ -355,7 +359,7 @@ class OrchestratorComponent(BaseModel, metaclass=TracedModelMeta):
355
359
  ... await self.ws.broadcast({
356
360
  ... "event": "agent_scheduled",
357
361
  ... "agent": agent.name,
358
- ... "count": len(artifacts)
362
+ ... "count": len(artifacts),
359
363
  ... })
360
364
  """
361
365
 
@@ -484,7 +488,10 @@ class BuiltinCollectionComponent(OrchestratorComponent):
484
488
  subscription_index=subscription_index,
485
489
  )
486
490
 
487
- if subscription.batch.timeout and orchestrator._batch_timeout_task is None:
491
+ if (
492
+ subscription.batch.timeout
493
+ and orchestrator._batch_timeout_task is None
494
+ ):
488
495
  import asyncio
489
496
 
490
497
  orchestrator._batch_timeout_task = asyncio.create_task(
@@ -500,7 +507,10 @@ class BuiltinCollectionComponent(OrchestratorComponent):
500
507
  subscription_index=subscription_index,
501
508
  )
502
509
 
503
- if subscription.batch.timeout and orchestrator._batch_timeout_task is None:
510
+ if (
511
+ subscription.batch.timeout
512
+ and orchestrator._batch_timeout_task is None
513
+ ):
504
514
  import asyncio
505
515
 
506
516
  orchestrator._batch_timeout_task = asyncio.create_task(
@@ -45,7 +45,9 @@ def patched_sync_send_to_stream(stream, message):
45
45
  try:
46
46
  asyncio.run(_send())
47
47
  except Exception as e:
48
- logger.debug(f"DSPy status message send failed in sync context (non-critical): {e}")
48
+ logger.debug(
49
+ f"DSPy status message send failed in sync context (non-critical): {e}"
50
+ )
49
51
 
50
52
 
51
53
  def apply_patch():
@@ -55,12 +57,16 @@ def apply_patch():
55
57
 
56
58
  # Store original for reference (in case we need to restore)
57
59
  if not hasattr(dspy_messages, "_original_sync_send_to_stream"):
58
- dspy_messages._original_sync_send_to_stream = dspy_messages.sync_send_to_stream
60
+ dspy_messages._original_sync_send_to_stream = (
61
+ dspy_messages.sync_send_to_stream
62
+ )
59
63
 
60
64
  # Replace with our non-blocking version
61
65
  dspy_messages.sync_send_to_stream = patched_sync_send_to_stream
62
66
 
63
- logger.info("Applied DSPy streaming patch - status messages are now non-blocking")
67
+ logger.info(
68
+ "Applied DSPy streaming patch - status messages are now non-blocking"
69
+ )
64
70
  return True
65
71
 
66
72
  except Exception as e:
@@ -74,7 +80,9 @@ def restore_original():
74
80
  import dspy.streaming.messages as dspy_messages
75
81
 
76
82
  if hasattr(dspy_messages, "_original_sync_send_to_stream"):
77
- dspy_messages.sync_send_to_stream = dspy_messages._original_sync_send_to_stream
83
+ dspy_messages.sync_send_to_stream = (
84
+ dspy_messages._original_sync_send_to_stream
85
+ )
78
86
  logger.info("Restored original DSPy streaming function")
79
87
  return True
80
88
 
flock/registry.py CHANGED
@@ -25,9 +25,13 @@ class TypeRegistry:
25
25
 
26
26
  def register(self, model: type[BaseModel], name: str | None = None) -> str:
27
27
  if not issubclass(model, BaseModel):
28
- raise RegistryError("Only Pydantic models can be registered as artifact types.")
28
+ raise RegistryError(
29
+ "Only Pydantic models can be registered as artifact types."
30
+ )
29
31
  type_name = (
30
- name or getattr(model, "__flock_type__", None) or f"{model.__module__}.{model.__name__}"
32
+ name
33
+ or getattr(model, "__flock_type__", None)
34
+ or f"{model.__module__}.{model.__name__}"
31
35
  )
32
36
  existing_model = self._by_name.get(type_name)
33
37
  if existing_model is not None and existing_model is not model:
@@ -127,7 +131,9 @@ def flock_type(model: type[BaseModel] | None = None, *, name: str | None = None)
127
131
  return _wrap(model)
128
132
 
129
133
 
130
- def flock_tool(func: Callable[..., Any] | None = None, *, name: str | None = None) -> Any:
134
+ def flock_tool(
135
+ func: Callable[..., Any] | None = None, *, name: str | None = None
136
+ ) -> Any:
131
137
  """Decorator to register a deterministic helper function for agents."""
132
138
 
133
139
  def _wrap(callable_: Callable[..., Any]) -> Callable[..., Any]: