flock-core 0.5.10__py3-none-any.whl → 0.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (91) hide show
  1. flock/__init__.py +1 -1
  2. flock/agent/__init__.py +30 -0
  3. flock/agent/builder_helpers.py +192 -0
  4. flock/agent/builder_validator.py +169 -0
  5. flock/agent/component_lifecycle.py +325 -0
  6. flock/agent/context_resolver.py +141 -0
  7. flock/agent/mcp_integration.py +212 -0
  8. flock/agent/output_processor.py +304 -0
  9. flock/api/__init__.py +20 -0
  10. flock/api/models.py +283 -0
  11. flock/{service.py → api/service.py} +121 -63
  12. flock/cli.py +2 -2
  13. flock/components/__init__.py +41 -0
  14. flock/components/agent/__init__.py +22 -0
  15. flock/{components.py → components/agent/base.py} +4 -3
  16. flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
  17. flock/components/orchestrator/__init__.py +22 -0
  18. flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
  19. flock/components/orchestrator/circuit_breaker.py +95 -0
  20. flock/components/orchestrator/collection.py +143 -0
  21. flock/components/orchestrator/deduplication.py +78 -0
  22. flock/core/__init__.py +30 -0
  23. flock/core/agent.py +953 -0
  24. flock/{artifacts.py → core/artifacts.py} +1 -1
  25. flock/{context_provider.py → core/context_provider.py} +3 -3
  26. flock/core/orchestrator.py +1102 -0
  27. flock/{store.py → core/store.py} +99 -454
  28. flock/{subscription.py → core/subscription.py} +1 -1
  29. flock/dashboard/collector.py +5 -5
  30. flock/dashboard/graph_builder.py +7 -7
  31. flock/dashboard/routes/__init__.py +21 -0
  32. flock/dashboard/routes/control.py +327 -0
  33. flock/dashboard/routes/helpers.py +340 -0
  34. flock/dashboard/routes/themes.py +76 -0
  35. flock/dashboard/routes/traces.py +521 -0
  36. flock/dashboard/routes/websocket.py +108 -0
  37. flock/dashboard/service.py +44 -1294
  38. flock/engines/dspy/__init__.py +20 -0
  39. flock/engines/dspy/artifact_materializer.py +216 -0
  40. flock/engines/dspy/signature_builder.py +474 -0
  41. flock/engines/dspy/streaming_executor.py +858 -0
  42. flock/engines/dspy_engine.py +45 -1330
  43. flock/engines/examples/simple_batch_engine.py +2 -2
  44. flock/examples.py +7 -7
  45. flock/logging/logging.py +1 -16
  46. flock/models/__init__.py +10 -0
  47. flock/models/system_artifacts.py +33 -0
  48. flock/orchestrator/__init__.py +45 -0
  49. flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
  50. flock/orchestrator/artifact_manager.py +168 -0
  51. flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
  52. flock/orchestrator/component_runner.py +389 -0
  53. flock/orchestrator/context_builder.py +167 -0
  54. flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
  55. flock/orchestrator/event_emitter.py +167 -0
  56. flock/orchestrator/initialization.py +184 -0
  57. flock/orchestrator/lifecycle_manager.py +226 -0
  58. flock/orchestrator/mcp_manager.py +202 -0
  59. flock/orchestrator/scheduler.py +189 -0
  60. flock/orchestrator/server_manager.py +234 -0
  61. flock/orchestrator/tracing.py +147 -0
  62. flock/storage/__init__.py +10 -0
  63. flock/storage/artifact_aggregator.py +158 -0
  64. flock/storage/in_memory/__init__.py +6 -0
  65. flock/storage/in_memory/artifact_filter.py +114 -0
  66. flock/storage/in_memory/history_aggregator.py +115 -0
  67. flock/storage/sqlite/__init__.py +10 -0
  68. flock/storage/sqlite/agent_history_queries.py +154 -0
  69. flock/storage/sqlite/consumption_loader.py +100 -0
  70. flock/storage/sqlite/query_builder.py +112 -0
  71. flock/storage/sqlite/query_params_builder.py +91 -0
  72. flock/storage/sqlite/schema_manager.py +168 -0
  73. flock/storage/sqlite/summary_queries.py +194 -0
  74. flock/utils/__init__.py +14 -0
  75. flock/utils/async_utils.py +67 -0
  76. flock/{runtime.py → utils/runtime.py} +3 -3
  77. flock/utils/time_utils.py +53 -0
  78. flock/utils/type_resolution.py +38 -0
  79. flock/{utilities.py → utils/utilities.py} +2 -2
  80. flock/utils/validation.py +57 -0
  81. flock/utils/visibility.py +79 -0
  82. flock/utils/visibility_utils.py +134 -0
  83. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/METADATA +69 -61
  84. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/RECORD +89 -31
  85. flock/agent.py +0 -1578
  86. flock/orchestrator.py +0 -1746
  87. /flock/{visibility.py → core/visibility.py} +0 -0
  88. /flock/{helper → utils}/cli_helper.py +0 -0
  89. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/WHEEL +0 -0
  90. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/entry_points.txt +0 -0
  91. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,202 @@
1
+ """MCP (Model Context Protocol) server management for orchestrator.
2
+
3
+ This module handles MCP server registration and client manager lifecycle.
4
+ Implements lazy connection establishment pattern (AD005).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from flock.mcp import (
14
+ FlockMCPClientManager,
15
+ FlockMCPConfiguration,
16
+ ServerParameters,
17
+ )
18
+
19
+
20
+ class MCPManager:
21
+ """Manages MCP server registration and client connections.
22
+
23
+ Architecture Decision: AD001 - Two-Level Architecture
24
+ MCP servers are registered at orchestrator level and assigned to agents.
25
+
26
+ Architecture Decision: AD005 - Lazy Connection Establishment
27
+ Connections are established only when get_mcp_manager() is first called.
28
+
29
+ Attributes:
30
+ _configs: Dict mapping server names to their configurations
31
+ _client_manager: Lazy-initialized MCP client manager instance
32
+ """
33
+
34
+ def __init__(self) -> None:
35
+ """Initialize the MCP manager with empty configuration."""
36
+ self._configs: dict[str, FlockMCPConfiguration] = {}
37
+ self._client_manager: FlockMCPClientManager | None = None
38
+
39
+ def add_mcp(
40
+ self,
41
+ name: str,
42
+ connection_params: ServerParameters,
43
+ *,
44
+ enable_tools_feature: bool = True,
45
+ enable_prompts_feature: bool = True,
46
+ enable_sampling_feature: bool = True,
47
+ enable_roots_feature: bool = True,
48
+ mount_points: list[str] | None = None,
49
+ tool_whitelist: list[str] | None = None,
50
+ read_timeout_seconds: float = 300,
51
+ max_retries: int = 3,
52
+ **kwargs,
53
+ ) -> None:
54
+ """Register an MCP server configuration.
55
+
56
+ Args:
57
+ name: Unique identifier for this MCP server
58
+ connection_params: Server connection parameters
59
+ enable_tools_feature: Enable tool execution
60
+ enable_prompts_feature: Enable prompt templates
61
+ enable_sampling_feature: Enable LLM sampling requests
62
+ enable_roots_feature: Enable filesystem roots
63
+ mount_points: Optional list of filesystem mount points
64
+ tool_whitelist: Optional list of tool names to allow
65
+ read_timeout_seconds: Timeout for server communications
66
+ max_retries: Connection retry attempts
67
+
68
+ Raises:
69
+ ValueError: If server name already registered
70
+ """
71
+ if name in self._configs:
72
+ raise ValueError(f"MCP server '{name}' is already registered.")
73
+
74
+ # Import configuration types
75
+ from flock.mcp import (
76
+ FlockMCPConfiguration,
77
+ FlockMCPConnectionConfiguration,
78
+ FlockMCPFeatureConfiguration,
79
+ )
80
+
81
+ # Detect transport type
82
+ from flock.mcp.types import (
83
+ SseServerParameters,
84
+ StdioServerParameters,
85
+ StreamableHttpServerParameters,
86
+ WebsocketServerParameters,
87
+ )
88
+
89
+ if isinstance(connection_params, StdioServerParameters):
90
+ transport_type = "stdio"
91
+ elif isinstance(connection_params, WebsocketServerParameters):
92
+ transport_type = "websockets"
93
+ elif isinstance(connection_params, SseServerParameters):
94
+ transport_type = "sse"
95
+ elif isinstance(connection_params, StreamableHttpServerParameters):
96
+ transport_type = "streamable_http"
97
+ else:
98
+ transport_type = "custom"
99
+
100
+ # Process mount points (convert paths to URIs)
101
+ mcp_roots = None
102
+ if mount_points:
103
+ from pathlib import Path as PathLib
104
+
105
+ from flock.mcp.types import MCPRoot
106
+
107
+ mcp_roots = []
108
+ for path in mount_points:
109
+ # Normalize the path
110
+ if path.startswith("file://"):
111
+ # Already a file URI
112
+ uri = path
113
+ # Extract path from URI for name
114
+ path_str = path.replace("file://", "")
115
+ # the test:// path-prefix is used by testing servers such as the mcp-everything server.
116
+ elif path.startswith("test://"):
117
+ # Already a test URI
118
+ uri = path
119
+ # Extract path from URI for name
120
+ path_str = path.replace("test://", "")
121
+ else:
122
+ # Convert to absolute path and create URI
123
+ abs_path = PathLib(path).resolve()
124
+ uri = f"file://{abs_path}"
125
+ path_str = str(abs_path)
126
+
127
+ # Extract a meaningful name (last component of path)
128
+ name_component = (
129
+ PathLib(path_str).name
130
+ or path_str.rstrip("/").split("/")[-1]
131
+ or "root"
132
+ )
133
+ mcp_roots.append(MCPRoot(uri=uri, name=name_component))
134
+
135
+ # Build configuration
136
+ connection_config = FlockMCPConnectionConfiguration(
137
+ max_retries=max_retries,
138
+ connection_parameters=connection_params,
139
+ transport_type=transport_type,
140
+ read_timeout_seconds=read_timeout_seconds,
141
+ mount_points=mcp_roots,
142
+ )
143
+
144
+ feature_config = FlockMCPFeatureConfiguration(
145
+ tools_enabled=enable_tools_feature,
146
+ prompts_enabled=enable_prompts_feature,
147
+ sampling_enabled=enable_sampling_feature,
148
+ roots_enabled=enable_roots_feature,
149
+ tool_whitelist=tool_whitelist,
150
+ )
151
+
152
+ mcp_config = FlockMCPConfiguration(
153
+ name=name,
154
+ connection_config=connection_config,
155
+ feature_config=feature_config,
156
+ )
157
+
158
+ self._configs[name] = mcp_config
159
+
160
+ def get_mcp_manager(self) -> FlockMCPClientManager:
161
+ """Get or create the MCP client manager.
162
+
163
+ Architecture Decision: AD005 - Lazy Connection Establishment
164
+ Connections are established only when this method is first called.
165
+
166
+ Returns:
167
+ FlockMCPClientManager instance
168
+
169
+ Raises:
170
+ RuntimeError: If no MCP servers registered
171
+ """
172
+ if not self._configs:
173
+ raise RuntimeError("No MCP servers registered. Call add_mcp() first.")
174
+
175
+ if self._client_manager is None:
176
+ from flock.mcp import FlockMCPClientManager
177
+
178
+ self._client_manager = FlockMCPClientManager(self._configs)
179
+
180
+ return self._client_manager
181
+
182
+ async def cleanup(self) -> None:
183
+ """Clean up MCP connections.
184
+
185
+ Called during orchestrator shutdown to properly close all MCP connections.
186
+ """
187
+ if self._client_manager is not None:
188
+ await self._client_manager.cleanup_all()
189
+ self._client_manager = None
190
+
191
+ @property
192
+ def configs(self) -> dict[str, FlockMCPConfiguration]:
193
+ """Get the dictionary of MCP configurations."""
194
+ return self._configs
195
+
196
+ @property
197
+ def has_configs(self) -> bool:
198
+ """Check if any MCP servers are registered."""
199
+ return bool(self._configs)
200
+
201
+
202
+ __all__ = ["MCPManager"]
@@ -0,0 +1,189 @@
1
+ """Agent scheduling engine."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from asyncio import Task
7
+ from typing import TYPE_CHECKING, Any
8
+
9
+ from flock.components.orchestrator import ScheduleDecision
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from flock.agent import Agent
14
+ from flock.core import Flock
15
+ from flock.core.artifacts import Artifact
16
+ from flock.core.visibility import AgentIdentity
17
+ from flock.orchestrator import ComponentRunner
18
+
19
+
20
+ class AgentScheduler:
21
+ """Schedules agents for execution based on artifact subscriptions.
22
+
23
+ Responsibilities:
24
+ - Match artifacts to agent subscriptions
25
+ - Run scheduling hooks via ComponentRunner
26
+ - Create agent execution tasks
27
+ - Manage task lifecycle
28
+ - Track processed artifacts for deduplication
29
+ """
30
+
31
+ def __init__(self, orchestrator: Flock, component_runner: ComponentRunner):
32
+ """Initialize scheduler.
33
+
34
+ Args:
35
+ orchestrator: Flock orchestrator instance
36
+ component_runner: Runner for executing component hooks
37
+ """
38
+ self._orchestrator = orchestrator
39
+ self._component_runner = component_runner
40
+ self._tasks: set[Task[Any]] = set()
41
+ self._processed: set[tuple[str, str]] = set()
42
+ self._logger = orchestrator._logger
43
+
44
+ async def schedule_artifact(self, artifact: Artifact) -> None:
45
+ """Schedule agents for an artifact using component hooks.
46
+
47
+ Args:
48
+ artifact: Published artifact to match against subscriptions
49
+ """
50
+ # Initialize components on first artifact
51
+ if not self._component_runner.is_initialized:
52
+ await self._component_runner.run_initialize(self._orchestrator)
53
+
54
+ # Component hook - artifact published (can transform or block)
55
+ artifact = await self._component_runner.run_artifact_published(
56
+ self._orchestrator, artifact
57
+ )
58
+ if artifact is None:
59
+ return # Artifact blocked by component
60
+
61
+ for agent in self._orchestrator.agents:
62
+ identity = agent.identity
63
+ for subscription in agent.subscriptions:
64
+ if not subscription.accepts_events():
65
+ continue
66
+
67
+ # Check prevent_self_trigger
68
+ if agent.prevent_self_trigger and artifact.produced_by == agent.name:
69
+ continue # Skip - agent produced this artifact
70
+
71
+ # Visibility check
72
+ if not self._check_visibility(artifact, identity):
73
+ continue
74
+
75
+ # Subscription match check
76
+ if not subscription.matches(artifact):
77
+ continue
78
+
79
+ # Component hook - before schedule (circuit breaker, deduplication)
80
+ decision = await self._component_runner.run_before_schedule(
81
+ self._orchestrator, artifact, agent, subscription
82
+ )
83
+ if decision == ScheduleDecision.SKIP:
84
+ continue
85
+ if decision == ScheduleDecision.DEFER:
86
+ continue
87
+
88
+ # Component hook - collect artifacts (AND gates, correlation, batching)
89
+ collection = await self._component_runner.run_collect_artifacts(
90
+ self._orchestrator, artifact, agent, subscription
91
+ )
92
+ if not collection.complete:
93
+ continue # Still collecting
94
+
95
+ artifacts = collection.artifacts
96
+
97
+ # Component hook - before agent schedule (final validation)
98
+ artifacts = await self._component_runner.run_before_agent_schedule(
99
+ self._orchestrator, agent, artifacts
100
+ )
101
+ if artifacts is None:
102
+ continue # Scheduling blocked
103
+
104
+ # Schedule agent task
105
+ is_batch_execution = subscription.batch is not None
106
+ task = self.schedule_task(agent, artifacts, is_batch=is_batch_execution)
107
+
108
+ # Component hook - agent scheduled (notification)
109
+ await self._component_runner.run_agent_scheduled(
110
+ self._orchestrator, agent, artifacts, task
111
+ )
112
+
113
+ def schedule_task(
114
+ self, agent: Agent, artifacts: list[Artifact], is_batch: bool = False
115
+ ) -> Task[Any]:
116
+ """Schedule agent task and return the task handle.
117
+
118
+ Args:
119
+ agent: Agent to execute
120
+ artifacts: Input artifacts
121
+ is_batch: Whether this is batch execution
122
+
123
+ Returns:
124
+ Asyncio task handle
125
+ """
126
+ task = asyncio.create_task(
127
+ self._orchestrator._run_agent_task(agent, artifacts, is_batch=is_batch)
128
+ )
129
+ self._tasks.add(task)
130
+ task.add_done_callback(self._tasks.discard)
131
+ return task
132
+
133
+ def record_agent_run(self, agent: Agent) -> None:
134
+ """Record agent run metric.
135
+
136
+ Args:
137
+ agent: Agent that ran
138
+ """
139
+ self._orchestrator.metrics["agent_runs"] += 1
140
+
141
+ def mark_processed(self, artifact: Artifact, agent: Agent) -> None:
142
+ """Mark artifact as processed by agent.
143
+
144
+ Args:
145
+ artifact: Processed artifact
146
+ agent: Agent that processed it
147
+ """
148
+ key = (str(artifact.id), agent.name)
149
+ self._processed.add(key)
150
+
151
+ def seen_before(self, artifact: Artifact, agent: Agent) -> bool:
152
+ """Check if artifact was already processed by agent.
153
+
154
+ Args:
155
+ artifact: Artifact to check
156
+ agent: Agent to check
157
+
158
+ Returns:
159
+ True if already processed
160
+ """
161
+ key = (str(artifact.id), agent.name)
162
+ return key in self._processed
163
+
164
+ def _check_visibility(self, artifact: Artifact, identity: AgentIdentity) -> bool:
165
+ """Check if artifact is visible to agent.
166
+
167
+ Args:
168
+ artifact: Artifact to check
169
+ identity: Agent identity
170
+
171
+ Returns:
172
+ True if visible
173
+ """
174
+ try:
175
+ return artifact.visibility.allows(identity)
176
+ except AttributeError: # pragma: no cover - fallback
177
+ return True
178
+
179
+ @property
180
+ def pending_tasks(self) -> set[Task[Any]]:
181
+ """Get set of pending agent tasks.
182
+
183
+ Returns:
184
+ Set of asyncio tasks
185
+ """
186
+ return self._tasks
187
+
188
+
189
+ __all__ = ["AgentScheduler"]
@@ -0,0 +1,234 @@
1
+ """HTTP server management for orchestrator.
2
+
3
+ Handles service startup with optional dashboard integration.
4
+ Extracted from orchestrator.py to reduce complexity.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ from asyncio import Task
11
+ from pathlib import Path
12
+ from typing import TYPE_CHECKING, Any
13
+
14
+
15
+ if TYPE_CHECKING:
16
+ from flock.core.orchestrator import Flock
17
+
18
+
19
+ class ServerManager:
20
+ """Manages HTTP service startup for the orchestrator.
21
+
22
+ Handles both standard API mode and dashboard mode with WebSocket support.
23
+ """
24
+
25
+ @staticmethod
26
+ async def serve(
27
+ orchestrator: Flock,
28
+ *,
29
+ dashboard: bool = False,
30
+ dashboard_v2: bool = False,
31
+ host: str = "127.0.0.1",
32
+ port: int = 8344,
33
+ blocking: bool = True,
34
+ ) -> Task[None] | None:
35
+ """Start HTTP service for the orchestrator.
36
+
37
+ Args:
38
+ orchestrator: The Flock orchestrator instance to serve
39
+ dashboard: Enable real-time dashboard with WebSocket support (default: False)
40
+ dashboard_v2: Launch the new dashboard v2 frontend (implies dashboard=True)
41
+ host: Host to bind to (default: "127.0.0.1")
42
+ port: Port to bind to (default: 8344)
43
+ blocking: If True, blocks until server stops. If False, starts server
44
+ in background and returns task handle (default: True)
45
+
46
+ Returns:
47
+ None if blocking=True, or Task handle if blocking=False
48
+
49
+ Examples:
50
+ # Basic HTTP API (no dashboard) - runs until interrupted
51
+ await ServerManager.serve(orchestrator)
52
+
53
+ # With dashboard (WebSocket + browser launch) - runs until interrupted
54
+ await ServerManager.serve(orchestrator, dashboard=True)
55
+
56
+ # Non-blocking mode - start server in background
57
+ task = await ServerManager.serve(orchestrator, dashboard=True, blocking=False)
58
+ # Now you can publish messages and run other logic
59
+ await orchestrator.publish(my_message)
60
+ await orchestrator.run_until_idle()
61
+ """
62
+ # If non-blocking, start server in background task
63
+ if not blocking:
64
+ server_task = asyncio.create_task(
65
+ ServerManager._serve_impl(
66
+ orchestrator,
67
+ dashboard=dashboard,
68
+ dashboard_v2=dashboard_v2,
69
+ host=host,
70
+ port=port,
71
+ )
72
+ )
73
+ # Add cleanup callback
74
+ server_task.add_done_callback(
75
+ lambda task: ServerManager._cleanup_server_callback(orchestrator, task)
76
+ )
77
+ # Store task reference for later cancellation
78
+ orchestrator._server_task = server_task
79
+ # Give server a moment to start
80
+ await asyncio.sleep(0.1)
81
+ return server_task
82
+
83
+ # Blocking mode - run server directly with cleanup
84
+ try:
85
+ await ServerManager._serve_impl(
86
+ orchestrator,
87
+ dashboard=dashboard,
88
+ dashboard_v2=dashboard_v2,
89
+ host=host,
90
+ port=port,
91
+ )
92
+ finally:
93
+ # In blocking mode, manually cleanup dashboard launcher
94
+ if (
95
+ hasattr(orchestrator, "_dashboard_launcher")
96
+ and orchestrator._dashboard_launcher is not None
97
+ ):
98
+ orchestrator._dashboard_launcher.stop()
99
+ orchestrator._dashboard_launcher = None
100
+ return None
101
+
102
+ @staticmethod
103
+ def _cleanup_server_callback(orchestrator: Flock, task: Task[None]) -> None:
104
+ """Cleanup callback when background server task completes."""
105
+ # Stop dashboard launcher if it was started
106
+ if (
107
+ hasattr(orchestrator, "_dashboard_launcher")
108
+ and orchestrator._dashboard_launcher is not None
109
+ ):
110
+ try:
111
+ orchestrator._dashboard_launcher.stop()
112
+ except Exception as e:
113
+ orchestrator._logger.warning(f"Failed to stop dashboard launcher: {e}")
114
+ finally:
115
+ orchestrator._dashboard_launcher = None
116
+
117
+ # Clear server task reference
118
+ if hasattr(orchestrator, "_server_task"):
119
+ orchestrator._server_task = None
120
+
121
+ # Log any exceptions from the task
122
+ try:
123
+ exc = task.exception()
124
+ if exc and not isinstance(exc, asyncio.CancelledError):
125
+ orchestrator._logger.error(f"Server task failed: {exc}", exc_info=exc)
126
+ except asyncio.CancelledError:
127
+ pass # Normal cancellation
128
+
129
+ @staticmethod
130
+ async def _serve_impl(
131
+ orchestrator: Flock,
132
+ *,
133
+ dashboard: bool = False,
134
+ dashboard_v2: bool = False,
135
+ host: str = "127.0.0.1",
136
+ port: int = 8344,
137
+ ) -> None:
138
+ """Internal implementation of serve() - actual server logic."""
139
+ if dashboard_v2:
140
+ dashboard = True
141
+
142
+ if not dashboard:
143
+ # Standard service without dashboard
144
+ await ServerManager._serve_standard(orchestrator, host=host, port=port)
145
+ return
146
+
147
+ # Dashboard mode with WebSocket and event collection
148
+ await ServerManager._serve_dashboard(
149
+ orchestrator, dashboard_v2=dashboard_v2, host=host, port=port
150
+ )
151
+
152
+ @staticmethod
153
+ async def _serve_standard(orchestrator: Flock, *, host: str, port: int) -> None:
154
+ """Serve standard HTTP API without dashboard.
155
+
156
+ Args:
157
+ orchestrator: The Flock orchestrator instance
158
+ host: Host to bind to
159
+ port: Port to bind to
160
+ """
161
+ from flock.api.service import BlackboardHTTPService
162
+
163
+ service = BlackboardHTTPService(orchestrator)
164
+ await service.run_async(host=host, port=port)
165
+
166
+ @staticmethod
167
+ async def _serve_dashboard(
168
+ orchestrator: Flock, *, dashboard_v2: bool, host: str, port: int
169
+ ) -> None:
170
+ """Serve HTTP API with dashboard and WebSocket support.
171
+
172
+ Args:
173
+ orchestrator: The Flock orchestrator instance
174
+ dashboard_v2: Whether to use v2 dashboard frontend
175
+ host: Host to bind to
176
+ port: Port to bind to
177
+ """
178
+ from flock.core import Agent
179
+ from flock.dashboard.collector import DashboardEventCollector
180
+ from flock.dashboard.launcher import DashboardLauncher
181
+ from flock.dashboard.service import DashboardHTTPService
182
+ from flock.dashboard.websocket import WebSocketManager
183
+
184
+ # Create dashboard components
185
+ websocket_manager = WebSocketManager()
186
+ event_collector = DashboardEventCollector(store=orchestrator.store)
187
+ event_collector.set_websocket_manager(websocket_manager)
188
+ await event_collector.load_persistent_snapshots()
189
+
190
+ # Store collector reference for agents added later
191
+ orchestrator._dashboard_collector = event_collector
192
+ # Store websocket manager for real-time event emission (Phase 1.2)
193
+ orchestrator._websocket_manager = websocket_manager
194
+ # Phase 5A: Set websocket manager on EventEmitter for dashboard updates
195
+ orchestrator._event_emitter.set_websocket_manager(websocket_manager)
196
+
197
+ # Phase 6+7: Set class-level WebSocket broadcast wrapper (dashboard mode)
198
+ async def _broadcast_wrapper(event):
199
+ """Isolated broadcast wrapper - no reference chain to orchestrator."""
200
+ return await websocket_manager.broadcast(event)
201
+
202
+ Agent._websocket_broadcast_global = _broadcast_wrapper
203
+
204
+ # Inject event collector into all existing agents
205
+ for agent in orchestrator._agents.values():
206
+ # Add dashboard collector with priority ordering handled by agent
207
+ agent._add_utilities([event_collector])
208
+
209
+ # Start dashboard launcher (npm process + browser)
210
+ launcher_kwargs: dict[str, Any] = {"port": port}
211
+ if dashboard_v2:
212
+ dashboard_pkg_dir = Path(__file__).parent.parent / "dashboard"
213
+ launcher_kwargs["frontend_dir"] = dashboard_pkg_dir.parent / "frontend_v2"
214
+ launcher_kwargs["static_dir"] = dashboard_pkg_dir / "static_v2"
215
+
216
+ launcher = DashboardLauncher(**launcher_kwargs)
217
+ launcher.start()
218
+
219
+ # Create dashboard HTTP service
220
+ service = DashboardHTTPService(
221
+ orchestrator=orchestrator,
222
+ websocket_manager=websocket_manager,
223
+ event_collector=event_collector,
224
+ use_v2=dashboard_v2,
225
+ )
226
+
227
+ # Store launcher for cleanup
228
+ orchestrator._dashboard_launcher = launcher
229
+
230
+ # Run service (blocking call)
231
+ # Note: Cleanup is NOT done here - it's handled by:
232
+ # - ServerManager.serve() finally block (blocking mode)
233
+ # - ServerManager._cleanup_server_callback() (non-blocking mode)
234
+ await service.run_async(host=host, port=port)