flock-core 0.5.10__py3-none-any.whl → 0.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (91) hide show
  1. flock/__init__.py +1 -1
  2. flock/agent/__init__.py +30 -0
  3. flock/agent/builder_helpers.py +192 -0
  4. flock/agent/builder_validator.py +169 -0
  5. flock/agent/component_lifecycle.py +325 -0
  6. flock/agent/context_resolver.py +141 -0
  7. flock/agent/mcp_integration.py +212 -0
  8. flock/agent/output_processor.py +304 -0
  9. flock/api/__init__.py +20 -0
  10. flock/api/models.py +283 -0
  11. flock/{service.py → api/service.py} +121 -63
  12. flock/cli.py +2 -2
  13. flock/components/__init__.py +41 -0
  14. flock/components/agent/__init__.py +22 -0
  15. flock/{components.py → components/agent/base.py} +4 -3
  16. flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
  17. flock/components/orchestrator/__init__.py +22 -0
  18. flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
  19. flock/components/orchestrator/circuit_breaker.py +95 -0
  20. flock/components/orchestrator/collection.py +143 -0
  21. flock/components/orchestrator/deduplication.py +78 -0
  22. flock/core/__init__.py +30 -0
  23. flock/core/agent.py +953 -0
  24. flock/{artifacts.py → core/artifacts.py} +1 -1
  25. flock/{context_provider.py → core/context_provider.py} +3 -3
  26. flock/core/orchestrator.py +1102 -0
  27. flock/{store.py → core/store.py} +99 -454
  28. flock/{subscription.py → core/subscription.py} +1 -1
  29. flock/dashboard/collector.py +5 -5
  30. flock/dashboard/graph_builder.py +7 -7
  31. flock/dashboard/routes/__init__.py +21 -0
  32. flock/dashboard/routes/control.py +327 -0
  33. flock/dashboard/routes/helpers.py +340 -0
  34. flock/dashboard/routes/themes.py +76 -0
  35. flock/dashboard/routes/traces.py +521 -0
  36. flock/dashboard/routes/websocket.py +108 -0
  37. flock/dashboard/service.py +44 -1294
  38. flock/engines/dspy/__init__.py +20 -0
  39. flock/engines/dspy/artifact_materializer.py +216 -0
  40. flock/engines/dspy/signature_builder.py +474 -0
  41. flock/engines/dspy/streaming_executor.py +858 -0
  42. flock/engines/dspy_engine.py +45 -1330
  43. flock/engines/examples/simple_batch_engine.py +2 -2
  44. flock/examples.py +7 -7
  45. flock/logging/logging.py +1 -16
  46. flock/models/__init__.py +10 -0
  47. flock/models/system_artifacts.py +33 -0
  48. flock/orchestrator/__init__.py +45 -0
  49. flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
  50. flock/orchestrator/artifact_manager.py +168 -0
  51. flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
  52. flock/orchestrator/component_runner.py +389 -0
  53. flock/orchestrator/context_builder.py +167 -0
  54. flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
  55. flock/orchestrator/event_emitter.py +167 -0
  56. flock/orchestrator/initialization.py +184 -0
  57. flock/orchestrator/lifecycle_manager.py +226 -0
  58. flock/orchestrator/mcp_manager.py +202 -0
  59. flock/orchestrator/scheduler.py +189 -0
  60. flock/orchestrator/server_manager.py +234 -0
  61. flock/orchestrator/tracing.py +147 -0
  62. flock/storage/__init__.py +10 -0
  63. flock/storage/artifact_aggregator.py +158 -0
  64. flock/storage/in_memory/__init__.py +6 -0
  65. flock/storage/in_memory/artifact_filter.py +114 -0
  66. flock/storage/in_memory/history_aggregator.py +115 -0
  67. flock/storage/sqlite/__init__.py +10 -0
  68. flock/storage/sqlite/agent_history_queries.py +154 -0
  69. flock/storage/sqlite/consumption_loader.py +100 -0
  70. flock/storage/sqlite/query_builder.py +112 -0
  71. flock/storage/sqlite/query_params_builder.py +91 -0
  72. flock/storage/sqlite/schema_manager.py +168 -0
  73. flock/storage/sqlite/summary_queries.py +194 -0
  74. flock/utils/__init__.py +14 -0
  75. flock/utils/async_utils.py +67 -0
  76. flock/{runtime.py → utils/runtime.py} +3 -3
  77. flock/utils/time_utils.py +53 -0
  78. flock/utils/type_resolution.py +38 -0
  79. flock/{utilities.py → utils/utilities.py} +2 -2
  80. flock/utils/validation.py +57 -0
  81. flock/utils/visibility.py +79 -0
  82. flock/utils/visibility_utils.py +134 -0
  83. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/METADATA +69 -61
  84. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/RECORD +89 -31
  85. flock/agent.py +0 -1578
  86. flock/orchestrator.py +0 -1746
  87. /flock/{visibility.py → core/visibility.py} +0 -0
  88. /flock/{helper → utils}/cli_helper.py +0 -0
  89. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/WHEEL +0 -0
  90. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/entry_points.txt +0 -0
  91. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1102 @@
1
+ """Blackboard orchestrator and scheduling runtime."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ import logging
7
+ import os
8
+ from asyncio import Task
9
+ from collections.abc import AsyncGenerator, Iterable, Mapping, Sequence
10
+ from contextlib import asynccontextmanager
11
+ from datetime import UTC, datetime
12
+ from typing import TYPE_CHECKING, Any
13
+
14
+ from pydantic import BaseModel
15
+
16
+ from flock.components.orchestrator import (
17
+ CollectionResult,
18
+ OrchestratorComponent,
19
+ ScheduleDecision,
20
+ )
21
+ from flock.core.agent import Agent, AgentBuilder
22
+ from flock.core.artifacts import Artifact
23
+ from flock.core.store import BlackboardStore, ConsumptionRecord
24
+ from flock.core.subscription import Subscription
25
+ from flock.core.visibility import PublicVisibility, Visibility
26
+ from flock.logging.auto_trace import AutoTracedMeta
27
+ from flock.mcp import (
28
+ FlockMCPClientManager,
29
+ FlockMCPConfiguration,
30
+ ServerParameters,
31
+ )
32
+ from flock.orchestrator import (
33
+ AgentScheduler,
34
+ ArtifactManager,
35
+ ComponentRunner,
36
+ OrchestratorInitializer,
37
+ ServerManager,
38
+ TracingManager,
39
+ )
40
+ from flock.registry import type_registry
41
+
42
+
43
+ if TYPE_CHECKING:
44
+ import builtins
45
+
46
+
47
+ class BoardHandle:
48
+ """Handle exposed to components for publishing and inspection."""
49
+
50
+ def __init__(self, orchestrator: Flock) -> None:
51
+ self._orchestrator = orchestrator
52
+
53
+ async def publish(self, artifact: Artifact) -> None:
54
+ await self._orchestrator._persist_and_schedule(artifact)
55
+
56
+ async def get(self, artifact_id) -> Artifact | None:
57
+ return await self._orchestrator.store.get(artifact_id)
58
+
59
+ async def list(self) -> builtins.list[Artifact]:
60
+ return await self._orchestrator.store.list()
61
+
62
+
63
+ class Flock(metaclass=AutoTracedMeta):
64
+ """Main orchestrator for blackboard-based agent coordination.
65
+
66
+ All public methods are automatically traced via OpenTelemetry.
67
+ """
68
+
69
+ def _patch_litellm_proxy_imports(self) -> None:
70
+ """Stub litellm proxy_server to avoid optional proxy deps when not used.
71
+
72
+ Some litellm versions import `litellm.proxy.proxy_server` during standard logging
73
+ to read `general_settings`, which pulls in optional dependencies like `apscheduler`.
74
+ We provide a stub so imports succeed but cold storage remains disabled.
75
+ """
76
+ try:
77
+ import sys
78
+ import types
79
+
80
+ if "litellm.proxy.proxy_server" not in sys.modules:
81
+ stub = types.ModuleType("litellm.proxy.proxy_server")
82
+ # Minimal surface that cold_storage_handler accesses
83
+ stub.general_settings = {}
84
+ sys.modules["litellm.proxy.proxy_server"] = stub
85
+ except Exception: # nosec B110 - Safe to ignore; worst case litellm will log a warning
86
+ # logger.debug(f"Failed to stub litellm proxy_server: {e}")
87
+ pass
88
+
89
+ def __init__(
90
+ self,
91
+ model: str | None = None,
92
+ *,
93
+ store: BlackboardStore | None = None,
94
+ max_agent_iterations: int = 1000,
95
+ context_provider: Any = None,
96
+ ) -> None:
97
+ """Initialize the Flock orchestrator for blackboard-based agent coordination.
98
+
99
+ Phase 3: Simplified using OrchestratorInitializer module.
100
+
101
+ Args:
102
+ model: Default LLM model for agents
103
+ store: Custom blackboard storage backend
104
+ max_agent_iterations: Circuit breaker limit
105
+ context_provider: Global context provider for all agents
106
+
107
+ Examples:
108
+ >>> flock = Flock("openai/gpt-4.1")
109
+ >>> flock = Flock("openai/gpt-4o", store=CustomStore())
110
+ """
111
+ # Patch litellm imports and setup logger
112
+ self._patch_litellm_proxy_imports()
113
+ self._logger = logging.getLogger(__name__)
114
+ self.model = model
115
+
116
+ # Phase 3: Initialize all components using OrchestratorInitializer
117
+ components = OrchestratorInitializer.initialize_components(
118
+ store=store,
119
+ context_provider=context_provider,
120
+ max_agent_iterations=max_agent_iterations,
121
+ logger=self._logger,
122
+ model=model,
123
+ )
124
+
125
+ # Assign basic state
126
+ self.store = components["store"]
127
+ self._agents = components["agents"]
128
+ self._lock = components["lock"]
129
+ self.metrics = components["metrics"]
130
+ self._agent_iteration_count = components["agent_iteration_count"]
131
+ self._default_context_provider = context_provider
132
+ self.max_agent_iterations = max_agent_iterations
133
+ self.is_dashboard = False
134
+
135
+ # Assign engines
136
+ self._artifact_collector = components["artifact_collector"]
137
+ self._correlation_engine = components["correlation_engine"]
138
+ self._batch_engine = components["batch_engine"]
139
+
140
+ # Assign Phase 5A modules
141
+ self._context_builder = components["context_builder"]
142
+ self._event_emitter = components["event_emitter"]
143
+ self._lifecycle_manager = components["lifecycle_manager"]
144
+
145
+ # Assign Phase 3 modules
146
+ self._mcp_manager_instance = components["mcp_manager_instance"]
147
+ self._tracing_manager = components["tracing_manager"]
148
+ self._auto_workflow_enabled = components["auto_workflow_enabled"]
149
+
150
+ # WebSocket manager (set by serve())
151
+ self.__websocket_manager = components["websocket_manager"]
152
+
153
+ # Set batch timeout callback
154
+ self._lifecycle_manager.set_batch_timeout_callback(self._check_batch_timeouts)
155
+
156
+ # Background server task for non-blocking serve() (set by ServerManager.serve)
157
+ self._server_task: Task[None] | None = None
158
+ self._dashboard_launcher: Any = None
159
+
160
+ # Initialize components list and built-in components
161
+ self._components: list[OrchestratorComponent] = []
162
+ runner_components = OrchestratorInitializer.initialize_components_and_runner(
163
+ self._components, max_agent_iterations, self._logger
164
+ )
165
+ self._component_runner = runner_components["component_runner"]
166
+
167
+ # Initialize scheduler and artifact manager
168
+ self._scheduler = AgentScheduler(self, self._component_runner)
169
+ self._artifact_manager = ArtifactManager(self, self.store, self._scheduler)
170
+
171
+ # Resolve model default
172
+ if not model:
173
+ self.model = os.getenv("DEFAULT_MODEL")
174
+
175
+ # Log initialization
176
+ self._logger.debug("Orchestrator initialized: components=[]")
177
+
178
+ # Agent management -----------------------------------------------------
179
+
180
+ def agent(self, name: str) -> AgentBuilder:
181
+ """Create a new agent using the fluent builder API.
182
+
183
+ Args:
184
+ name: Unique identifier for the agent. Used for visibility controls and metrics.
185
+
186
+ Returns:
187
+ AgentBuilder for fluent configuration
188
+
189
+ Raises:
190
+ ValueError: If an agent with this name already exists
191
+
192
+ Examples:
193
+ >>> # Basic agent
194
+ >>> pizza_agent = (
195
+ ... flock.agent("pizza_master")
196
+ ... .description("Creates delicious pizza recipes")
197
+ ... .consumes(DreamPizza)
198
+ ... .publishes(Pizza)
199
+ ... )
200
+
201
+ >>> # Advanced agent with filtering
202
+ >>> critic = (
203
+ ... flock.agent("critic")
204
+ ... .consumes(Movie, where=lambda m: m.rating >= 8)
205
+ ... .publishes(Review)
206
+ ... .with_utilities(RateLimiter(max_calls=10))
207
+ ... )
208
+ """
209
+ if name in self._agents:
210
+ raise ValueError(f"Agent '{name}' already registered.")
211
+ return AgentBuilder(self, name)
212
+
213
+ def register_agent(self, agent: Agent) -> None:
214
+ if agent.name in self._agents:
215
+ raise ValueError(f"Agent '{agent.name}' already registered.")
216
+ self._agents[agent.name] = agent
217
+
218
+ def get_agent(self, name: str) -> Agent:
219
+ return self._agents[name]
220
+
221
+ @property
222
+ def agents(self) -> list[Agent]:
223
+ return list(self._agents.values())
224
+
225
+ async def get_correlation_status(self, correlation_id: str) -> dict[str, Any]:
226
+ """Get the status of a workflow by correlation ID.
227
+
228
+ Args:
229
+ correlation_id: The correlation ID to check
230
+
231
+ Returns:
232
+ Dictionary containing workflow status information:
233
+ - state: "active" if work is pending, "completed" otherwise
234
+ - has_pending_work: True if orchestrator has pending work for this correlation
235
+ - artifact_count: Total number of artifacts with this correlation_id
236
+ - error_count: Number of WorkflowError artifacts
237
+ - started_at: Timestamp of first artifact (if any)
238
+ - last_activity_at: Timestamp of most recent artifact (if any)
239
+ """
240
+ from uuid import UUID
241
+
242
+ try:
243
+ correlation_uuid = UUID(correlation_id)
244
+ except ValueError as exc:
245
+ raise ValueError(
246
+ f"Invalid correlation_id format: {correlation_id}"
247
+ ) from exc
248
+
249
+ # Check if orchestrator has pending work for this correlation
250
+ # 1. Check active tasks for this correlation_id
251
+ has_active_tasks = (
252
+ correlation_uuid in self._scheduler._correlation_tasks
253
+ and bool(self._scheduler._correlation_tasks[correlation_uuid])
254
+ )
255
+
256
+ # 2. Check correlation groups (for agents with JoinSpec that haven't yielded yet)
257
+ has_pending_groups = False
258
+ for groups in self._correlation_engine.correlation_groups.values():
259
+ for group_key, group in groups.items():
260
+ # Check if this group belongs to our correlation
261
+ for type_name, artifacts in group.waiting_artifacts.items():
262
+ if any(
263
+ artifact.correlation_id == correlation_uuid
264
+ for artifact in artifacts
265
+ ):
266
+ has_pending_groups = True
267
+ break
268
+ if has_pending_groups:
269
+ break
270
+ if has_pending_groups:
271
+ break
272
+
273
+ # Workflow has pending work if EITHER tasks are active OR groups are waiting
274
+ has_pending_work = has_active_tasks or has_pending_groups
275
+
276
+ # Query artifacts for this correlation
277
+ from flock.core.store import FilterConfig
278
+
279
+ filters = FilterConfig(correlation_id=correlation_id)
280
+ artifacts, total = await self.store.query_artifacts(
281
+ filters, limit=1000, offset=0
282
+ )
283
+
284
+ # Count errors - use registry to get correct type name after Phase 8 refactor
285
+ from flock.models.system_artifacts import WorkflowError
286
+
287
+ workflow_error_type = type_registry.name_for(WorkflowError)
288
+ error_count = sum(
289
+ 1 for artifact in artifacts if artifact.type == workflow_error_type
290
+ )
291
+
292
+ # Get timestamps
293
+ started_at = None
294
+ last_activity_at = None
295
+ if artifacts:
296
+ timestamps = [artifact.created_at for artifact in artifacts]
297
+ started_at = min(timestamps).isoformat()
298
+ last_activity_at = max(timestamps).isoformat()
299
+
300
+ # Determine state
301
+ if has_pending_work:
302
+ state = "active"
303
+ elif total == 0:
304
+ state = "not_found"
305
+ elif error_count > 0 and total == error_count:
306
+ state = "failed" # Only error artifacts exist
307
+ else:
308
+ state = "completed"
309
+
310
+ return {
311
+ "correlation_id": correlation_id,
312
+ "state": state,
313
+ "has_pending_work": has_pending_work,
314
+ "artifact_count": total,
315
+ "error_count": error_count,
316
+ "started_at": started_at,
317
+ "last_activity_at": last_activity_at,
318
+ }
319
+
320
+ # Phase 5A: WebSocket manager property (auto-updates event emitter)
321
+
322
+ @property
323
+ def _websocket_manager(self) -> Any:
324
+ """Get the WebSocket manager for dashboard events."""
325
+ return self.__websocket_manager
326
+
327
+ @_websocket_manager.setter
328
+ def _websocket_manager(self, value: Any) -> None:
329
+ """Set the WebSocket manager and propagate to EventEmitter."""
330
+ self.__websocket_manager = value
331
+ self._event_emitter.set_websocket_manager(value)
332
+
333
+ # Component management -------------------------------------------------
334
+
335
+ def add_component(self, component: OrchestratorComponent) -> Flock:
336
+ """Add an OrchestratorComponent to this orchestrator.
337
+
338
+ Components execute in priority order (lower priority number = earlier).
339
+ Multiple components can have the same priority.
340
+
341
+ Args:
342
+ component: Component to add (must be an OrchestratorComponent instance)
343
+
344
+ Returns:
345
+ Self for method chaining
346
+
347
+ Examples:
348
+ >>> # Add single component
349
+ >>> flock = Flock("openai/gpt-4.1")
350
+ >>> flock.add_component(CircuitBreakerComponent(max_iterations=500))
351
+
352
+ >>> # Method chaining
353
+ >>> flock.add_component(CircuitBreakerComponent()) \\
354
+ ... .add_component(MetricsComponent()) \\
355
+ ... .add_component(DeduplicationComponent())
356
+
357
+ >>> # Custom priority (lower = earlier)
358
+ >>> flock.add_component(
359
+ ... CustomComponent(priority=5, name="early_component")
360
+ ... )
361
+ """
362
+ self._components.append(component)
363
+ self._components.sort(key=lambda c: c.priority)
364
+
365
+ # Phase 3: Update ComponentRunner with new sorted components
366
+ self._component_runner = ComponentRunner(self._components, self._logger)
367
+
368
+ # Log component addition
369
+ comp_name = component.name or component.__class__.__name__
370
+ self._logger.info(
371
+ f"Component added: name={comp_name}, "
372
+ f"priority={component.priority}, total_components={len(self._components)}"
373
+ )
374
+
375
+ return self
376
+
377
+ # MCP management - Phase 3 extracted to MCPManager -------------------------------------------------------
378
+
379
+ def add_mcp(
380
+ self,
381
+ name: str,
382
+ connection_params: ServerParameters,
383
+ *,
384
+ enable_tools_feature: bool = True,
385
+ enable_prompts_feature: bool = True,
386
+ enable_sampling_feature: bool = True,
387
+ enable_roots_feature: bool = True,
388
+ mount_points: list[str] | None = None,
389
+ tool_whitelist: list[str] | None = None,
390
+ read_timeout_seconds: float = 300,
391
+ max_retries: int = 3,
392
+ **kwargs,
393
+ ) -> Flock:
394
+ """Register an MCP server for use by agents.
395
+
396
+ Architecture Decision: AD001 - Two-Level Architecture
397
+ MCP servers are registered at orchestrator level and assigned to agents.
398
+
399
+ Args:
400
+ name: Unique identifier for this MCP server
401
+ connection_params: Server connection parameters
402
+ enable_tools_feature: Enable tool execution
403
+ enable_prompts_feature: Enable prompt templates
404
+ enable_sampling_feature: Enable LLM sampling requests
405
+ enable_roots_feature: Enable filesystem roots
406
+ tool_whitelist: Optional list of tool names to allow
407
+ read_timeout_seconds: Timeout for server communications
408
+ max_retries: Connection retry attempts
409
+
410
+ Returns:
411
+ self for method chaining
412
+
413
+ Raises:
414
+ ValueError: If server name already registered
415
+ """
416
+ # Phase 3: Delegate to MCPManager
417
+ self._mcp_manager_instance.add_mcp(
418
+ name,
419
+ connection_params,
420
+ enable_tools_feature=enable_tools_feature,
421
+ enable_prompts_feature=enable_prompts_feature,
422
+ enable_sampling_feature=enable_sampling_feature,
423
+ enable_roots_feature=enable_roots_feature,
424
+ mount_points=mount_points,
425
+ tool_whitelist=tool_whitelist,
426
+ read_timeout_seconds=read_timeout_seconds,
427
+ max_retries=max_retries,
428
+ **kwargs,
429
+ )
430
+ return self
431
+
432
+ def get_mcp_manager(self) -> FlockMCPClientManager:
433
+ """Get or create the MCP client manager.
434
+
435
+ Architecture Decision: AD005 - Lazy Connection Establishment
436
+ """
437
+ # Phase 3: Delegate to MCPManager
438
+ return self._mcp_manager_instance.get_mcp_manager()
439
+
440
+ @property
441
+ def _mcp_configs(self) -> dict[str, FlockMCPConfiguration]:
442
+ """Get the dictionary of MCP configurations (Phase 3: delegated to MCPManager)."""
443
+ return self._mcp_manager_instance.configs
444
+
445
+ @property
446
+ def _mcp_manager(self) -> FlockMCPClientManager | None:
447
+ """Get the MCP manager instance."""
448
+ return self._mcp_manager_instance._client_manager
449
+
450
+ # Unified Tracing - Phase 3: Delegated to TracingManager --------------
451
+
452
+ @property
453
+ def _workflow_span(self) -> Any:
454
+ """Get current workflow span (for backwards compatibility with tests)."""
455
+ return self._tracing_manager.current_workflow_span
456
+
457
+ @asynccontextmanager
458
+ async def traced_run(self, name: str = "workflow") -> AsyncGenerator[Any, None]:
459
+ """Context manager for wrapping an entire execution in a single unified trace.
460
+
461
+ Phase 3: Delegates to TracingManager module.
462
+
463
+ Args:
464
+ name: Name for the workflow trace (default: "workflow")
465
+
466
+ Yields:
467
+ The workflow span for optional manual attribute setting
468
+
469
+ Examples:
470
+ async with flock.traced_run("pizza_workflow"):
471
+ await flock.publish(pizza_idea)
472
+ await flock.run_until_idle()
473
+ """
474
+ async with self._tracing_manager.traced_run(
475
+ name=name, flock_id=str(id(self))
476
+ ) as span:
477
+ yield span
478
+
479
+ @staticmethod
480
+ def clear_traces(db_path: str = ".flock/traces.duckdb") -> dict[str, Any]:
481
+ """Clear all traces from the DuckDB database.
482
+
483
+ Phase 3: Delegates to TracingManager module.
484
+
485
+ Args:
486
+ db_path: Path to the DuckDB database file
487
+
488
+ Returns:
489
+ Dictionary with operation results (deleted_count, success, error)
490
+
491
+ Examples:
492
+ result = Flock.clear_traces()
493
+ print(f"Deleted {result['deleted_count']} spans")
494
+ """
495
+ return TracingManager.clear_traces(db_path)
496
+
497
+ # Runtime --------------------------------------------------------------
498
+
499
+ async def run_until_idle(self) -> None:
500
+ """Wait for all scheduled agent tasks to complete.
501
+
502
+ This method blocks until the blackboard reaches a stable state where no
503
+ agents are queued for execution. Essential for batch processing and ensuring
504
+ all agent cascades complete before continuing.
505
+
506
+ Note:
507
+ Automatically resets circuit breaker counters and shuts down MCP connections
508
+ when idle. Used with publish() for event-driven workflows.
509
+
510
+ Examples:
511
+ >>> # Event-driven workflow (recommended)
512
+ >>> await flock.publish(task1)
513
+ >>> await flock.publish(task2)
514
+ >>> await flock.run_until_idle() # Wait for all cascades
515
+ >>> # All agents have finished processing
516
+
517
+ >>> # Parallel batch processing
518
+ >>> await flock.publish_many([task1, task2, task3])
519
+ >>> await flock.run_until_idle() # All tasks processed in parallel
520
+
521
+ See Also:
522
+ - publish(): Event-driven artifact publishing
523
+ - publish_many(): Batch publishing for parallel execution
524
+ - invoke(): Direct agent invocation without cascade
525
+ """
526
+ while self._scheduler.pending_tasks:
527
+ await asyncio.sleep(0.01)
528
+ pending = {
529
+ task for task in self._scheduler.pending_tasks if not task.done()
530
+ }
531
+ self._scheduler._tasks = pending
532
+
533
+ # Phase 5A: Check for pending work using LifecycleManager properties
534
+ pending_batches = self._lifecycle_manager.has_pending_batches
535
+ pending_correlations = self._lifecycle_manager.has_pending_correlations
536
+
537
+ # Ensure watchdog loops remain active while pending work exists.
538
+ if pending_batches:
539
+ await self._lifecycle_manager.start_batch_timeout_checker()
540
+
541
+ if pending_correlations:
542
+ await self._lifecycle_manager.start_correlation_cleanup()
543
+
544
+ # If deferred work is still outstanding, consider the orchestrator quiescent for
545
+ # now but leave watchdog tasks running to finish the job.
546
+ if pending_batches or pending_correlations:
547
+ self._agent_iteration_count.clear()
548
+ return
549
+
550
+ # Notify components that orchestrator reached idle state
551
+ if self._component_runner.is_initialized:
552
+ await self._component_runner.run_idle(self)
553
+
554
+ # T068: Reset circuit breaker counters when idle
555
+ self._agent_iteration_count.clear()
556
+
557
+ # Automatically shutdown MCP connections when idle
558
+ await self.shutdown(include_components=False)
559
+
560
+ async def direct_invoke(
561
+ self, agent: Agent, inputs: Sequence[BaseModel | Mapping[str, Any] | Artifact]
562
+ ) -> list[Artifact]:
563
+ artifacts = [
564
+ self._normalize_input(value, produced_by="__direct__") for value in inputs
565
+ ]
566
+ for artifact in artifacts:
567
+ self._mark_processed(artifact, agent)
568
+ await self._persist_and_schedule(artifact)
569
+
570
+ # Phase 5A: Use ContextBuilder to create execution context (consolidates duplicated pattern)
571
+ # This implements the security boundary pattern (Phase 8 security fix)
572
+ ctx = await self._context_builder.build_execution_context(
573
+ agent=agent,
574
+ artifacts=artifacts,
575
+ correlation_id=artifacts[0].correlation_id
576
+ if artifacts and artifacts[0].correlation_id
577
+ else None,
578
+ is_batch=False,
579
+ )
580
+ self._record_agent_run(agent)
581
+ return await agent.execute(ctx, artifacts)
582
+
583
+ async def arun(
584
+ self, agent_builder: AgentBuilder, *inputs: BaseModel
585
+ ) -> list[Artifact]:
586
+ """Execute an agent with inputs and wait for all cascades to complete (async).
587
+
588
+ Convenience method that combines direct agent invocation with run_until_idle().
589
+ Useful for testing and synchronous request-response patterns.
590
+
591
+ Args:
592
+ agent_builder: Agent to execute (from flock.agent())
593
+ *inputs: Input objects (BaseModel instances)
594
+
595
+ Returns:
596
+ Artifacts produced by the agent and any triggered cascades
597
+
598
+ Examples:
599
+ >>> # Test a single agent
600
+ >>> flock = Flock("openai/gpt-4.1")
601
+ >>> pizza_agent = flock.agent("pizza").consumes(Idea).publishes(Pizza)
602
+ >>> results = await flock.arun(pizza_agent, Idea(topic="Margherita"))
603
+
604
+ >>> # Multiple inputs
605
+ >>> results = await flock.arun(
606
+ ... task_agent, Task(name="deploy"), Task(name="test")
607
+ ... )
608
+
609
+ Note:
610
+ For event-driven workflows, prefer publish() + run_until_idle() for better
611
+ control over execution timing and parallel processing.
612
+ """
613
+ artifacts = await self.direct_invoke(agent_builder.agent, list(inputs))
614
+ await self.run_until_idle()
615
+ return artifacts
616
+
617
+ def run(self, agent_builder: AgentBuilder, *inputs: BaseModel) -> list[Artifact]:
618
+ """Synchronous wrapper for arun() - executes agent and waits for completion.
619
+
620
+ Args:
621
+ agent_builder: Agent to execute (from flock.agent())
622
+ *inputs: Input objects (BaseModel instances)
623
+
624
+ Returns:
625
+ Artifacts produced by the agent and any triggered cascades
626
+
627
+ Examples:
628
+ >>> # Synchronous execution (blocks until complete)
629
+ >>> flock = Flock("openai/gpt-4o-mini")
630
+ >>> agent = flock.agent("analyzer").consumes(Data).publishes(Report)
631
+ >>> results = flock.run(agent, Data(value=42))
632
+
633
+ Warning:
634
+ Cannot be called from within an async context. Use arun() instead
635
+ if already in an async function.
636
+ """
637
+ return asyncio.run(self.arun(agent_builder, *inputs))
638
+
639
+ async def shutdown(self, *, include_components: bool = True) -> None:
640
+ """Shutdown orchestrator and clean up resources.
641
+
642
+ Args:
643
+ include_components: Whether to invoke component shutdown hooks.
644
+ Internal callers (e.g., run_until_idle) disable this to avoid
645
+ tearing down component state between cascades.
646
+ """
647
+ if include_components and self._component_runner.is_initialized:
648
+ await self._component_runner.run_shutdown(self)
649
+
650
+ # Cancel background server task if running (non-blocking serve)
651
+ if self._server_task and not self._server_task.done():
652
+ self._server_task.cancel()
653
+ try:
654
+ await self._server_task
655
+ except asyncio.CancelledError:
656
+ pass
657
+ # Note: _cleanup_server_callback will handle launcher.stop()
658
+
659
+ # Phase 5A: Delegate lifecycle cleanup to LifecycleManager
660
+ await self._lifecycle_manager.shutdown()
661
+
662
+ # Phase 3: Delegate MCP cleanup to MCPManager
663
+ await self._mcp_manager_instance.cleanup()
664
+
665
+ def cli(self) -> Flock:
666
+ # Placeholder for CLI wiring (rich UI in Step 3)
667
+ return self
668
+
669
+ async def serve(
670
+ self,
671
+ *,
672
+ dashboard: bool = False,
673
+ dashboard_v2: bool = False,
674
+ host: str = "127.0.0.1",
675
+ port: int = 8344,
676
+ blocking: bool = True,
677
+ ) -> Task[None] | None:
678
+ """Start HTTP service for the orchestrator.
679
+
680
+ Phase 3: Delegates to ServerManager module.
681
+
682
+ Args:
683
+ dashboard: Enable real-time dashboard with WebSocket support
684
+ dashboard_v2: Launch the new dashboard v2 frontend (implies dashboard=True)
685
+ host: Host to bind to (default: "127.0.0.1")
686
+ port: Port to bind to (default: 8344)
687
+ blocking: If True, blocks until server stops. If False, starts server
688
+ in background and returns task handle (default: True)
689
+
690
+ Returns:
691
+ None if blocking=True, or Task handle if blocking=False
692
+
693
+ Examples:
694
+ await orchestrator.serve()
695
+ await orchestrator.serve(dashboard=True)
696
+
697
+ # Non-blocking mode
698
+ task = await orchestrator.serve(dashboard=True, blocking=False)
699
+ await orchestrator.publish(my_message)
700
+ await orchestrator.run_until_idle()
701
+ """
702
+ return await ServerManager.serve(
703
+ self,
704
+ dashboard=dashboard,
705
+ dashboard_v2=dashboard_v2,
706
+ host=host,
707
+ port=port,
708
+ blocking=blocking,
709
+ )
710
+
711
+ # Scheduling -----------------------------------------------------------
712
+
713
+ async def publish(
714
+ self,
715
+ obj: BaseModel | dict | Artifact,
716
+ *,
717
+ visibility: Visibility | None = None,
718
+ correlation_id: str | None = None,
719
+ partition_key: str | None = None,
720
+ tags: set[str] | None = None,
721
+ is_dashboard: bool = False,
722
+ ) -> Artifact:
723
+ """Publish an artifact to the blackboard (event-driven).
724
+
725
+ Delegates to ArtifactManager for normalization and persistence.
726
+ """
727
+ return await self._artifact_manager.publish(
728
+ obj,
729
+ visibility=visibility,
730
+ correlation_id=correlation_id,
731
+ partition_key=partition_key,
732
+ tags=tags,
733
+ is_dashboard=is_dashboard,
734
+ )
735
+
736
+ async def publish_many(
737
+ self, objects: Iterable[BaseModel | dict | Artifact], **kwargs: Any
738
+ ) -> list[Artifact]:
739
+ """Publish multiple artifacts at once (event-driven).
740
+
741
+ Delegates to ArtifactManager for batch publishing.
742
+ """
743
+ return await self._artifact_manager.publish_many(objects, **kwargs)
744
+
745
+ # -----------------------------------------------------------------------------
746
+ # NEW DIRECT INVOCATION API - Explicit Control
747
+ # -----------------------------------------------------------------------------
748
+
749
+ async def invoke(
750
+ self,
751
+ agent: Agent | AgentBuilder,
752
+ obj: BaseModel,
753
+ *,
754
+ publish_outputs: bool = True,
755
+ timeout: float | None = None,
756
+ ) -> list[Artifact]:
757
+ """Directly invoke a specific agent (bypasses subscription matching).
758
+
759
+ This executes the agent immediately without checking subscriptions or
760
+ predicates. Useful for testing or synchronous request-response patterns.
761
+
762
+ Args:
763
+ agent: Agent or AgentBuilder to invoke
764
+ obj: Input object (BaseModel instance)
765
+ publish_outputs: If True, publish outputs to blackboard for cascade
766
+ timeout: Optional timeout in seconds
767
+
768
+ Returns:
769
+ Artifacts produced by the agent
770
+
771
+ Warning:
772
+ This bypasses subscription filters and predicates. For event-driven
773
+ coordination, use publish() instead.
774
+
775
+ Examples:
776
+ >>> # Testing: Execute agent without triggering others
777
+ >>> results = await orchestrator.invoke(
778
+ ... agent, Task(name="test", priority=5), publish_outputs=False
779
+ ... )
780
+
781
+ >>> # HTTP endpoint: Execute specific agent, allow cascade
782
+ >>> results = await orchestrator.invoke(
783
+ ... movie_agent, Idea(topic="AI", genre="comedy"), publish_outputs=True
784
+ ... )
785
+ >>> await orchestrator.run_until_idle()
786
+ """
787
+ from asyncio import wait_for
788
+
789
+ # Get Agent instance
790
+ agent_obj = agent.agent if isinstance(agent, AgentBuilder) else agent
791
+
792
+ # Create artifact (don't publish to blackboard yet)
793
+ type_name = type_registry.name_for(type(obj))
794
+ artifact = Artifact(
795
+ type=type_name,
796
+ payload=obj.model_dump(),
797
+ produced_by="__direct__",
798
+ visibility=PublicVisibility(),
799
+ )
800
+
801
+ # Phase 5A: Use ContextBuilder to create execution context (consolidates duplicated pattern)
802
+ # This implements the security boundary pattern (Phase 8 security fix)
803
+ ctx = await self._context_builder.build_execution_context(
804
+ agent=agent_obj,
805
+ artifacts=[artifact],
806
+ correlation_id=artifact.correlation_id if artifact.correlation_id else None,
807
+ is_batch=False,
808
+ )
809
+ self._record_agent_run(agent_obj)
810
+
811
+ # Execute with optional timeout
812
+ if timeout:
813
+ execution = agent_obj.execute(ctx, [artifact])
814
+ outputs = await wait_for(execution, timeout=timeout)
815
+ else:
816
+ outputs = await agent_obj.execute(ctx, [artifact])
817
+
818
+ # Phase 6: Orchestrator publishes outputs (security fix)
819
+ # Agents return artifacts, orchestrator validates and publishes
820
+ if publish_outputs:
821
+ for output in outputs:
822
+ await self._persist_and_schedule(output)
823
+
824
+ return outputs
825
+
826
+ async def _persist_and_schedule(self, artifact: Artifact) -> None:
827
+ """Delegate to ArtifactManager."""
828
+ await self._artifact_manager.persist_and_schedule(artifact)
829
+
830
+ # Component Hook Delegation ───
831
+
832
+ async def _run_initialize(self) -> None:
833
+ """Delegate to ComponentRunner module."""
834
+ await self._component_runner.run_initialize(self)
835
+
836
+ async def _run_artifact_published(self, artifact: Artifact) -> Artifact | None:
837
+ """Delegate to ComponentRunner module."""
838
+ return await self._component_runner.run_artifact_published(self, artifact)
839
+
840
+ async def _run_before_schedule(
841
+ self, artifact: Artifact, agent: Agent, subscription: Subscription
842
+ ) -> ScheduleDecision:
843
+ """Delegate to ComponentRunner module."""
844
+ return await self._component_runner.run_before_schedule(
845
+ self, artifact, agent, subscription
846
+ )
847
+
848
+ async def _run_collect_artifacts(
849
+ self, artifact: Artifact, agent: Agent, subscription: Subscription
850
+ ) -> CollectionResult:
851
+ """Delegate to ComponentRunner module."""
852
+ return await self._component_runner.run_collect_artifacts(
853
+ self, artifact, agent, subscription
854
+ )
855
+
856
+ async def _run_before_agent_schedule(
857
+ self, agent: Agent, artifacts: list[Artifact]
858
+ ) -> list[Artifact] | None:
859
+ """Delegate to ComponentRunner module."""
860
+ return await self._component_runner.run_before_agent_schedule(
861
+ self, agent, artifacts
862
+ )
863
+
864
+ async def _run_agent_scheduled(
865
+ self, agent: Agent, artifacts: list[Artifact], task: Task[Any]
866
+ ) -> None:
867
+ """Delegate to ComponentRunner module."""
868
+ await self._component_runner.run_agent_scheduled(self, agent, artifacts, task)
869
+
870
+ async def _run_idle(self) -> None:
871
+ """Delegate to ComponentRunner module."""
872
+ await self._component_runner.run_idle(self)
873
+
874
+ async def _run_shutdown(self) -> None:
875
+ """Delegate to ComponentRunner module."""
876
+ await self._component_runner.run_shutdown(self)
877
+
878
+ @property
879
+ def _components_initialized(self) -> bool:
880
+ """Delegate to ComponentRunner module."""
881
+ return self._component_runner.is_initialized
882
+
883
+ # Scheduling ───────────────────────────────────────────────────
884
+
885
+ async def _schedule_artifact(self, artifact: Artifact) -> None:
886
+ """Delegate to AgentScheduler."""
887
+ await self._scheduler.schedule_artifact(artifact)
888
+
889
+ def _schedule_task(
890
+ self, agent: Agent, artifacts: list[Artifact], is_batch: bool = False
891
+ ) -> Task[Any]:
892
+ """Delegate to AgentScheduler."""
893
+ return self._scheduler.schedule_task(agent, artifacts, is_batch=is_batch)
894
+
895
+ def _record_agent_run(self, agent: Agent) -> None:
896
+ self._scheduler.record_agent_run(agent)
897
+
898
+ def _mark_processed(self, artifact: Artifact, agent: Agent) -> None:
899
+ self._scheduler.mark_processed(artifact, agent)
900
+
901
+ def _seen_before(self, artifact: Artifact, agent: Agent) -> bool:
902
+ return self._scheduler.seen_before(artifact, agent)
903
+
904
+ async def _run_agent_task(
905
+ self, agent: Agent, artifacts: list[Artifact], is_batch: bool = False
906
+ ) -> None:
907
+ correlation_id = artifacts[0].correlation_id if artifacts else None
908
+
909
+ # Phase 5A: Use ContextBuilder to create execution context (consolidates duplicated pattern)
910
+ # This implements the security boundary pattern (Phase 8 security fix)
911
+ # COMPLEXITY REDUCTION: This reduces _run_agent_task from C(11) to likely B or A
912
+ ctx = await self._context_builder.build_execution_context(
913
+ agent=agent,
914
+ artifacts=artifacts,
915
+ correlation_id=correlation_id,
916
+ is_batch=is_batch,
917
+ )
918
+ self._record_agent_run(agent)
919
+
920
+ # Phase 6: Execute agent (returns artifacts, doesn't publish)
921
+ # Wrap in try/catch to handle agent failures gracefully
922
+ try:
923
+ outputs = await agent.execute(ctx, artifacts)
924
+ except asyncio.CancelledError:
925
+ # Re-raise cancellations immediately (shutdown, user cancellation)
926
+ # Do NOT treat these as errors - they're intentional interruptions
927
+ self._logger.debug(
928
+ f"Agent '{agent.name}' task cancelled (task={ctx.task_id})"
929
+ )
930
+ raise # Propagate cancellation so task.cancelled() == True
931
+ except Exception as exc:
932
+ # Agent already called component.on_error hooks before re-raising
933
+ # Now orchestrator publishes error artifact and continues workflow
934
+ from flock.models.system_artifacts import WorkflowError
935
+
936
+ error_artifact_data = WorkflowError(
937
+ failed_agent=agent.name,
938
+ error_type=type(exc).__name__,
939
+ error_message=str(exc),
940
+ timestamp=datetime.now(UTC),
941
+ task_id=ctx.task_id,
942
+ )
943
+
944
+ # Build and publish error artifact with correlation_id
945
+ from flock.core.artifacts import ArtifactSpec
946
+
947
+ error_spec = ArtifactSpec.from_model(WorkflowError)
948
+ error_artifact = error_spec.build(
949
+ produced_by=f"orchestrator#{agent.name}",
950
+ data=error_artifact_data.model_dump(),
951
+ correlation_id=correlation_id,
952
+ )
953
+
954
+ await self._persist_and_schedule(error_artifact)
955
+
956
+ # Log error but don't re-raise - workflow continues
957
+ self._logger.error(
958
+ f"Agent '{agent.name}' failed (task={ctx.task_id}): {exc}",
959
+ exc_info=True,
960
+ )
961
+ return # Exit early - no outputs to publish
962
+
963
+ # Phase 6: Orchestrator publishes outputs (security fix)
964
+ # This fixes Vulnerability #2 (WRITE Bypass) - agents can't bypass validation
965
+ for output in outputs:
966
+ await self._persist_and_schedule(output)
967
+
968
+ if artifacts:
969
+ try:
970
+ timestamp = datetime.now(UTC)
971
+ records = [
972
+ ConsumptionRecord(
973
+ artifact_id=artifact.id,
974
+ consumer=agent.name,
975
+ run_id=ctx.task_id,
976
+ correlation_id=str(correlation_id) if correlation_id else None,
977
+ consumed_at=timestamp,
978
+ )
979
+ for artifact in artifacts
980
+ ]
981
+ await self.store.record_consumptions(records)
982
+ except NotImplementedError:
983
+ pass
984
+ except Exception as exc: # pragma: no cover - defensive logging
985
+ self._logger.exception("Failed to record artifact consumption: %s", exc)
986
+
987
+ # Phase 1.2: Logic Operations Event Emission ----------------------------
988
+ # Phase 5A: Delegated to EventEmitter module
989
+
990
+ async def _emit_correlation_updated_event(
991
+ self, *, agent_name: str, subscription_index: int, artifact: Artifact
992
+ ) -> None:
993
+ """Emit CorrelationGroupUpdatedEvent for real-time dashboard updates.
994
+
995
+ Phase 5A: Delegates to EventEmitter module.
996
+
997
+ Args:
998
+ agent_name: Name of the agent with the JoinSpec subscription
999
+ subscription_index: Index of the subscription in the agent's subscriptions list
1000
+ artifact: The artifact that triggered this update
1001
+ """
1002
+ await self._event_emitter.emit_correlation_updated(
1003
+ correlation_engine=self._correlation_engine,
1004
+ agent_name=agent_name,
1005
+ subscription_index=subscription_index,
1006
+ artifact=artifact,
1007
+ )
1008
+
1009
+ async def _emit_batch_item_added_event(
1010
+ self,
1011
+ *,
1012
+ agent_name: str,
1013
+ subscription_index: int,
1014
+ subscription: Subscription, # noqa: F821
1015
+ artifact: Artifact,
1016
+ ) -> None:
1017
+ """Emit BatchItemAddedEvent for real-time dashboard updates.
1018
+
1019
+ Phase 5A: Delegates to EventEmitter module.
1020
+
1021
+ Args:
1022
+ agent_name: Name of the agent with the BatchSpec subscription
1023
+ subscription_index: Index of the subscription in the agent's subscriptions list
1024
+ subscription: The subscription with BatchSpec configuration
1025
+ artifact: The artifact that triggered this update
1026
+ """
1027
+ await self._event_emitter.emit_batch_item_added(
1028
+ batch_engine=self._batch_engine,
1029
+ agent_name=agent_name,
1030
+ subscription_index=subscription_index,
1031
+ subscription=subscription,
1032
+ artifact=artifact,
1033
+ )
1034
+
1035
+ # Batch Helpers --------------------------------------------------------
1036
+ # Phase 5A: Delegated to LifecycleManager module
1037
+
1038
+ async def _check_batch_timeouts(self) -> None:
1039
+ """Check all batches for timeout expiry and flush expired batches.
1040
+
1041
+ Phase 5A: Delegates to LifecycleManager module.
1042
+ """
1043
+
1044
+ async def schedule_callback(
1045
+ agent_name: str, _subscription_index: int, artifacts: list[Artifact]
1046
+ ) -> None:
1047
+ """Callback to schedule agent task for expired batch."""
1048
+ agent = self._agents.get(agent_name)
1049
+ if agent is not None:
1050
+ self._schedule_task(agent, artifacts, is_batch=True)
1051
+
1052
+ await self._lifecycle_manager.check_batch_timeouts(schedule_callback)
1053
+
1054
+ async def _flush_all_batches(self) -> None:
1055
+ """Flush all partial batches (for shutdown - ensures zero data loss).
1056
+
1057
+ Phase 5A: Delegates to LifecycleManager module.
1058
+ """
1059
+
1060
+ async def schedule_callback(
1061
+ agent_name: str, _subscription_index: int, artifacts: list[Artifact]
1062
+ ) -> None:
1063
+ """Callback to schedule agent task for flushed batch."""
1064
+ agent = self._agents.get(agent_name)
1065
+ if agent is not None:
1066
+ self._schedule_task(agent, artifacts, is_batch=True)
1067
+
1068
+ await self._lifecycle_manager.flush_all_batches(schedule_callback)
1069
+ # Wait for all scheduled tasks to complete
1070
+ await self.run_until_idle()
1071
+
1072
+ # Helpers --------------------------------------------------------------
1073
+
1074
+ def _normalize_input(
1075
+ self, value: BaseModel | Mapping[str, Any] | Artifact, *, produced_by: str
1076
+ ) -> Artifact:
1077
+ if isinstance(value, Artifact):
1078
+ return value
1079
+ if isinstance(value, BaseModel):
1080
+ model_cls = type(value)
1081
+ type_name = type_registry.register(model_cls)
1082
+ payload = value.model_dump()
1083
+ elif isinstance(value, Mapping):
1084
+ if "type" not in value:
1085
+ raise ValueError("Mapping input must contain 'type'.")
1086
+ type_name = value["type"]
1087
+ payload = value.get("payload", {})
1088
+ else: # pragma: no cover - defensive
1089
+ raise TypeError("Unsupported input for direct invoke.")
1090
+ return Artifact(type=type_name, payload=payload, produced_by=produced_by)
1091
+
1092
+
1093
+ @asynccontextmanager
1094
+ async def start_orchestrator(orchestrator: Flock): # pragma: no cover - CLI helper
1095
+ try:
1096
+ yield orchestrator
1097
+ await orchestrator.run_until_idle()
1098
+ finally:
1099
+ pass
1100
+
1101
+
1102
+ __all__ = ["Flock", "start_orchestrator"]