flock-core 0.5.10__py3-none-any.whl → 0.5.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (91) hide show
  1. flock/__init__.py +1 -1
  2. flock/agent/__init__.py +30 -0
  3. flock/agent/builder_helpers.py +192 -0
  4. flock/agent/builder_validator.py +169 -0
  5. flock/agent/component_lifecycle.py +325 -0
  6. flock/agent/context_resolver.py +141 -0
  7. flock/agent/mcp_integration.py +212 -0
  8. flock/agent/output_processor.py +304 -0
  9. flock/api/__init__.py +20 -0
  10. flock/api/models.py +283 -0
  11. flock/{service.py → api/service.py} +121 -63
  12. flock/cli.py +2 -2
  13. flock/components/__init__.py +41 -0
  14. flock/components/agent/__init__.py +22 -0
  15. flock/{components.py → components/agent/base.py} +4 -3
  16. flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
  17. flock/components/orchestrator/__init__.py +22 -0
  18. flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
  19. flock/components/orchestrator/circuit_breaker.py +95 -0
  20. flock/components/orchestrator/collection.py +143 -0
  21. flock/components/orchestrator/deduplication.py +78 -0
  22. flock/core/__init__.py +30 -0
  23. flock/core/agent.py +953 -0
  24. flock/{artifacts.py → core/artifacts.py} +1 -1
  25. flock/{context_provider.py → core/context_provider.py} +3 -3
  26. flock/core/orchestrator.py +1102 -0
  27. flock/{store.py → core/store.py} +99 -454
  28. flock/{subscription.py → core/subscription.py} +1 -1
  29. flock/dashboard/collector.py +5 -5
  30. flock/dashboard/graph_builder.py +7 -7
  31. flock/dashboard/routes/__init__.py +21 -0
  32. flock/dashboard/routes/control.py +327 -0
  33. flock/dashboard/routes/helpers.py +340 -0
  34. flock/dashboard/routes/themes.py +76 -0
  35. flock/dashboard/routes/traces.py +521 -0
  36. flock/dashboard/routes/websocket.py +108 -0
  37. flock/dashboard/service.py +44 -1294
  38. flock/engines/dspy/__init__.py +20 -0
  39. flock/engines/dspy/artifact_materializer.py +216 -0
  40. flock/engines/dspy/signature_builder.py +474 -0
  41. flock/engines/dspy/streaming_executor.py +858 -0
  42. flock/engines/dspy_engine.py +45 -1330
  43. flock/engines/examples/simple_batch_engine.py +2 -2
  44. flock/examples.py +7 -7
  45. flock/logging/logging.py +1 -16
  46. flock/models/__init__.py +10 -0
  47. flock/models/system_artifacts.py +33 -0
  48. flock/orchestrator/__init__.py +45 -0
  49. flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
  50. flock/orchestrator/artifact_manager.py +168 -0
  51. flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
  52. flock/orchestrator/component_runner.py +389 -0
  53. flock/orchestrator/context_builder.py +167 -0
  54. flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
  55. flock/orchestrator/event_emitter.py +167 -0
  56. flock/orchestrator/initialization.py +184 -0
  57. flock/orchestrator/lifecycle_manager.py +226 -0
  58. flock/orchestrator/mcp_manager.py +202 -0
  59. flock/orchestrator/scheduler.py +189 -0
  60. flock/orchestrator/server_manager.py +234 -0
  61. flock/orchestrator/tracing.py +147 -0
  62. flock/storage/__init__.py +10 -0
  63. flock/storage/artifact_aggregator.py +158 -0
  64. flock/storage/in_memory/__init__.py +6 -0
  65. flock/storage/in_memory/artifact_filter.py +114 -0
  66. flock/storage/in_memory/history_aggregator.py +115 -0
  67. flock/storage/sqlite/__init__.py +10 -0
  68. flock/storage/sqlite/agent_history_queries.py +154 -0
  69. flock/storage/sqlite/consumption_loader.py +100 -0
  70. flock/storage/sqlite/query_builder.py +112 -0
  71. flock/storage/sqlite/query_params_builder.py +91 -0
  72. flock/storage/sqlite/schema_manager.py +168 -0
  73. flock/storage/sqlite/summary_queries.py +194 -0
  74. flock/utils/__init__.py +14 -0
  75. flock/utils/async_utils.py +67 -0
  76. flock/{runtime.py → utils/runtime.py} +3 -3
  77. flock/utils/time_utils.py +53 -0
  78. flock/utils/type_resolution.py +38 -0
  79. flock/{utilities.py → utils/utilities.py} +2 -2
  80. flock/utils/validation.py +57 -0
  81. flock/utils/visibility.py +79 -0
  82. flock/utils/visibility_utils.py +134 -0
  83. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/METADATA +69 -61
  84. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/RECORD +89 -31
  85. flock/agent.py +0 -1578
  86. flock/orchestrator.py +0 -1746
  87. /flock/{visibility.py → core/visibility.py} +0 -0
  88. /flock/{helper → utils}/cli_helper.py +0 -0
  89. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/WHEEL +0 -0
  90. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/entry_points.txt +0 -0
  91. {flock_core-0.5.10.dist-info → flock_core-0.5.20.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,325 @@
1
+ """Agent component lifecycle management - hook execution and coordination.
2
+
3
+ Phase 4: Extracted from agent.py to organize component hook execution logic.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import logging
9
+ from collections.abc import Sequence
10
+ from typing import TYPE_CHECKING
11
+
12
+ from flock.logging.logging import get_logger
13
+
14
+
15
+ if TYPE_CHECKING:
16
+ from flock.components import AgentComponent, EngineComponent
17
+ from flock.core import Agent
18
+ from flock.core.artifacts import Artifact
19
+ from flock.utils.runtime import Context, EvalInputs, EvalResult
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class ComponentLifecycle:
25
+ """Manages agent component lifecycle hook execution.
26
+
27
+ This module handles all component hook execution during agent lifecycle:
28
+ - on_initialize
29
+ - on_pre_consume
30
+ - on_pre_evaluate
31
+ - on_post_evaluate
32
+ - on_post_publish
33
+ - on_error
34
+ - on_terminate
35
+ """
36
+
37
+ def __init__(self, agent_name: str):
38
+ """Initialize ComponentLifecycle for a specific agent.
39
+
40
+ Args:
41
+ agent_name: Name of the agent (for logging)
42
+ """
43
+ self._agent_name = agent_name
44
+ self._logger = logging.getLogger(__name__)
45
+
46
+ def _component_display_name(
47
+ self, component: AgentComponent | EngineComponent
48
+ ) -> str:
49
+ """Get display name for component logging."""
50
+ return getattr(component, "name", None) or component.__class__.__name__
51
+
52
+ async def run_initialize(
53
+ self,
54
+ agent: Agent,
55
+ ctx: Context,
56
+ utilities: list[AgentComponent],
57
+ engines: list[EngineComponent],
58
+ ) -> None:
59
+ """Execute on_initialize hooks for all components.
60
+
61
+ Args:
62
+ agent: Agent instance
63
+ ctx: Execution context
64
+ utilities: List of utility components
65
+ engines: List of engine components
66
+
67
+ Raises:
68
+ Exception: If any component initialization fails
69
+ """
70
+ for component in utilities:
71
+ comp_name = self._component_display_name(component)
72
+ priority = getattr(component, "priority", 0)
73
+ logger.debug(
74
+ f"Agent initialize: agent={self._agent_name}, component={comp_name}, priority={priority}"
75
+ )
76
+ try:
77
+ await component.on_initialize(agent, ctx)
78
+ except Exception as exc:
79
+ logger.exception(
80
+ f"Agent initialize failed: agent={self._agent_name}, component={comp_name}, "
81
+ f"priority={priority}, error={exc!s}"
82
+ )
83
+ raise
84
+
85
+ for engine in engines:
86
+ await engine.on_initialize(agent, ctx)
87
+
88
+ async def run_pre_consume(
89
+ self,
90
+ agent: Agent,
91
+ ctx: Context,
92
+ inputs: list[Artifact],
93
+ utilities: list[AgentComponent],
94
+ ) -> list[Artifact]:
95
+ """Execute on_pre_consume hooks, allowing components to transform inputs.
96
+
97
+ Args:
98
+ agent: Agent instance
99
+ ctx: Execution context
100
+ inputs: Input artifacts to be consumed
101
+ utilities: List of utility components
102
+
103
+ Returns:
104
+ Transformed input artifacts after all components process them
105
+
106
+ Raises:
107
+ Exception: If any component pre_consume hook fails
108
+ """
109
+ current = inputs
110
+ for component in utilities:
111
+ comp_name = self._component_display_name(component)
112
+ priority = getattr(component, "priority", 0)
113
+ logger.debug(
114
+ f"Agent pre_consume: agent={self._agent_name}, component={comp_name}, "
115
+ f"priority={priority}, input_count={len(current)}"
116
+ )
117
+ try:
118
+ current = await component.on_pre_consume(agent, ctx, current)
119
+ except Exception as exc:
120
+ logger.exception(
121
+ f"Agent pre_consume failed: agent={self._agent_name}, component={comp_name}, "
122
+ f"priority={priority}, error={exc!s}"
123
+ )
124
+ raise
125
+ return current
126
+
127
+ async def run_pre_evaluate(
128
+ self,
129
+ agent: Agent,
130
+ ctx: Context,
131
+ inputs: EvalInputs,
132
+ utilities: list[AgentComponent],
133
+ ) -> EvalInputs:
134
+ """Execute on_pre_evaluate hooks, allowing components to transform evaluation inputs.
135
+
136
+ Args:
137
+ agent: Agent instance
138
+ ctx: Execution context
139
+ inputs: Evaluation inputs with artifacts and state
140
+ utilities: List of utility components
141
+
142
+ Returns:
143
+ Transformed evaluation inputs after all components process them
144
+
145
+ Raises:
146
+ Exception: If any component pre_evaluate hook fails
147
+ """
148
+ current = inputs
149
+ for component in utilities:
150
+ comp_name = self._component_display_name(component)
151
+ priority = getattr(component, "priority", 0)
152
+ logger.debug(
153
+ f"Agent pre_evaluate: agent={self._agent_name}, component={comp_name}, "
154
+ f"priority={priority}, artifact_count={len(current.artifacts)}"
155
+ )
156
+ try:
157
+ current = await component.on_pre_evaluate(agent, ctx, current)
158
+ except Exception as exc:
159
+ logger.exception(
160
+ f"Agent pre_evaluate failed: agent={self._agent_name}, component={comp_name}, "
161
+ f"priority={priority}, error={exc!s}"
162
+ )
163
+ raise
164
+ return current
165
+
166
+ async def run_post_evaluate(
167
+ self,
168
+ agent: Agent,
169
+ ctx: Context,
170
+ inputs: EvalInputs,
171
+ result: EvalResult,
172
+ utilities: list[AgentComponent],
173
+ ) -> EvalResult:
174
+ """Execute on_post_evaluate hooks, allowing components to transform results.
175
+
176
+ Args:
177
+ agent: Agent instance
178
+ ctx: Execution context
179
+ inputs: Original evaluation inputs
180
+ result: Evaluation result to be transformed
181
+ utilities: List of utility components
182
+
183
+ Returns:
184
+ Transformed evaluation result after all components process it
185
+
186
+ Raises:
187
+ Exception: If any component post_evaluate hook fails
188
+ """
189
+ current = result
190
+ for component in utilities:
191
+ comp_name = self._component_display_name(component)
192
+ priority = getattr(component, "priority", 0)
193
+ logger.debug(
194
+ f"Agent post_evaluate: agent={self._agent_name}, component={comp_name}, "
195
+ f"priority={priority}, artifact_count={len(current.artifacts)}"
196
+ )
197
+ try:
198
+ current = await component.on_post_evaluate(agent, ctx, inputs, current)
199
+ except Exception as exc:
200
+ logger.exception(
201
+ f"Agent post_evaluate failed: agent={self._agent_name}, component={comp_name}, "
202
+ f"priority={priority}, error={exc!s}"
203
+ )
204
+ raise
205
+ return current
206
+
207
+ async def run_post_publish(
208
+ self,
209
+ agent: Agent,
210
+ ctx: Context,
211
+ artifacts: Sequence[Artifact],
212
+ utilities: list[AgentComponent],
213
+ ) -> None:
214
+ """Execute on_post_publish hooks for each published artifact.
215
+
216
+ Args:
217
+ agent: Agent instance
218
+ ctx: Execution context
219
+ artifacts: Published artifacts
220
+ utilities: List of utility components
221
+
222
+ Raises:
223
+ Exception: If any component post_publish hook fails
224
+ """
225
+ for artifact in artifacts:
226
+ for component in utilities:
227
+ comp_name = self._component_display_name(component)
228
+ priority = getattr(component, "priority", 0)
229
+ logger.debug(
230
+ f"Agent post_publish: agent={self._agent_name}, component={comp_name}, "
231
+ f"priority={priority}, artifact_id={artifact.id}"
232
+ )
233
+ try:
234
+ await component.on_post_publish(agent, ctx, artifact)
235
+ except Exception as exc:
236
+ logger.exception(
237
+ f"Agent post_publish failed: agent={self._agent_name}, component={comp_name}, "
238
+ f"priority={priority}, artifact_id={artifact.id}, error={exc!s}"
239
+ )
240
+ raise
241
+
242
+ async def run_error(
243
+ self,
244
+ agent: Agent,
245
+ ctx: Context,
246
+ error: Exception,
247
+ utilities: list[AgentComponent],
248
+ engines: list[EngineComponent],
249
+ ) -> None:
250
+ """Execute on_error hooks for all components.
251
+
252
+ Args:
253
+ agent: Agent instance
254
+ ctx: Execution context
255
+ error: Exception that occurred
256
+ utilities: List of utility components
257
+ engines: List of engine components
258
+
259
+ Raises:
260
+ Exception: If any component error hook fails
261
+ """
262
+ for component in utilities:
263
+ comp_name = self._component_display_name(component)
264
+ priority = getattr(component, "priority", 0)
265
+
266
+ # Python 3.12+ TaskGroup raises BaseExceptionGroup - extract sub-exceptions
267
+ error_detail = str(error)
268
+ if isinstance(error, BaseExceptionGroup):
269
+ sub_exceptions = [f"{type(e).__name__}: {e}" for e in error.exceptions]
270
+ error_detail = f"{error!s} - Sub-exceptions: {sub_exceptions}"
271
+
272
+ logger.debug(
273
+ f"Agent error hook: agent={self._agent_name}, component={comp_name}, "
274
+ f"priority={priority}, error={error_detail}"
275
+ )
276
+ try:
277
+ await component.on_error(agent, ctx, error)
278
+ except Exception as exc:
279
+ logger.exception(
280
+ f"Agent error hook failed: agent={self._agent_name}, component={comp_name}, "
281
+ f"priority={priority}, original_error={error!s}, hook_error={exc!s}"
282
+ )
283
+ raise
284
+
285
+ for engine in engines:
286
+ await engine.on_error(agent, ctx, error)
287
+
288
+ async def run_terminate(
289
+ self,
290
+ agent: Agent,
291
+ ctx: Context,
292
+ utilities: list[AgentComponent],
293
+ engines: list[EngineComponent],
294
+ ) -> None:
295
+ """Execute on_terminate hooks for all components.
296
+
297
+ Args:
298
+ agent: Agent instance
299
+ ctx: Execution context
300
+ utilities: List of utility components
301
+ engines: List of engine components
302
+
303
+ Raises:
304
+ Exception: If any component termination fails
305
+ """
306
+ for component in utilities:
307
+ comp_name = self._component_display_name(component)
308
+ priority = getattr(component, "priority", 0)
309
+ logger.debug(
310
+ f"Agent terminate: agent={self._agent_name}, component={comp_name}, priority={priority}"
311
+ )
312
+ try:
313
+ await component.on_terminate(agent, ctx)
314
+ except Exception as exc:
315
+ logger.exception(
316
+ f"Agent terminate failed: agent={self._agent_name}, component={comp_name}, "
317
+ f"priority={priority}, error={exc!s}"
318
+ )
319
+ raise
320
+
321
+ for engine in engines:
322
+ await engine.on_terminate(agent, ctx)
323
+
324
+
325
+ __all__ = ["ComponentLifecycle"]
@@ -0,0 +1,141 @@
1
+ """Agent context resolution - future context provider integration.
2
+
3
+ Phase 4: Extracted from agent.py to organize context provider logic.
4
+
5
+ NOTE: This module provides the foundation for context provider resolution.
6
+ Currently minimal as context resolution happens at the orchestrator level.
7
+ This extraction prepares for future refactoring where agents will have
8
+ more control over their execution context.
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ import logging
14
+ from typing import TYPE_CHECKING, Any
15
+
16
+ from flock.logging.logging import get_logger
17
+
18
+
19
+ if TYPE_CHECKING:
20
+ from flock.core import Agent
21
+ from flock.core.artifacts import Artifact
22
+ from flock.core.subscription import Subscription
23
+ from flock.utils.runtime import Context
24
+
25
+ logger = get_logger(__name__)
26
+
27
+
28
+ class ContextResolver:
29
+ """Manages context provider resolution for agent execution.
30
+
31
+ This module handles determining which context provider to use
32
+ and potentially fetching context artifacts in future phases.
33
+
34
+ Currently minimal as context resolution is orchestrator-level.
35
+ This extraction prepares for Phase 6 refactoring where context
36
+ providers will be more agent-centric.
37
+ """
38
+
39
+ def __init__(self, agent_name: str):
40
+ """Initialize ContextResolver for a specific agent.
41
+
42
+ Args:
43
+ agent_name: Name of the agent (for logging)
44
+ """
45
+ self._agent_name = agent_name
46
+ self._logger = logging.getLogger(__name__)
47
+
48
+ def get_provider(
49
+ self, agent: Agent, default_provider: Any | None = None
50
+ ) -> Any | None:
51
+ """Determine which context provider to use for this agent.
52
+
53
+ Resolution order:
54
+ 1. Agent-specific provider (agent.context_provider)
55
+ 2. Orchestrator default provider (default_provider)
56
+ 3. None (no provider configured)
57
+
58
+ Args:
59
+ agent: Agent instance
60
+ default_provider: Default provider from orchestrator
61
+
62
+ Returns:
63
+ Context provider to use, or None if not configured
64
+ """
65
+ # Check agent-specific provider first (Phase 3 security fix)
66
+ if agent.context_provider is not None:
67
+ logger.debug(
68
+ f"Agent context resolution: agent={self._agent_name}, "
69
+ f"using_agent_provider=True"
70
+ )
71
+ return agent.context_provider
72
+
73
+ # Fall back to orchestrator default
74
+ if default_provider is not None:
75
+ logger.debug(
76
+ f"Agent context resolution: agent={self._agent_name}, "
77
+ f"using_default_provider=True"
78
+ )
79
+ return default_provider
80
+
81
+ # No provider configured
82
+ logger.debug(
83
+ f"Agent context resolution: agent={self._agent_name}, "
84
+ f"no_provider_configured=True"
85
+ )
86
+ return None
87
+
88
+ async def resolve_context(
89
+ self,
90
+ agent: Agent,
91
+ subscription: Subscription,
92
+ trigger_artifacts: list[Artifact],
93
+ default_provider: Any | None = None,
94
+ ) -> Context:
95
+ """Resolve execution context for agent (future implementation).
96
+
97
+ NOTE: Currently returns a basic Context. Future phases will:
98
+ - Use provider to fetch additional context artifacts
99
+ - Build AgentContext with trigger + context artifacts
100
+ - Apply visibility filtering at security boundary
101
+
102
+ Args:
103
+ agent: Agent being executed
104
+ subscription: Subscription that triggered execution
105
+ trigger_artifacts: Artifacts that triggered agent
106
+ default_provider: Default context provider from orchestrator
107
+
108
+ Returns:
109
+ Resolved execution context (currently basic)
110
+ """
111
+ # Determine which provider to use
112
+ provider = self.get_provider(agent, default_provider)
113
+
114
+ # Future: Use provider to fetch context artifacts
115
+ # For now, we just log the resolution
116
+ if provider is None:
117
+ self._logger.debug(
118
+ f"Agent context: agent={self._agent_name}, "
119
+ f"no_context_provider, using_trigger_artifacts_only=True"
120
+ )
121
+
122
+ # Future implementation will:
123
+ # 1. Build context request from subscription
124
+ # 2. Call provider.get_artifacts(request)
125
+ # 3. Return AgentContext with both trigger + context artifacts
126
+
127
+ # For now, return a minimal context structure
128
+ # (actual Context building is orchestrator-level in current architecture)
129
+ from flock.utils.runtime import Context
130
+
131
+ return Context(
132
+ correlation_id=None, # Will be filled by orchestrator
133
+ task_id="", # Will be filled by orchestrator
134
+ state={},
135
+ is_batch=False,
136
+ artifacts=[],
137
+ agent_identity=None,
138
+ )
139
+
140
+
141
+ __all__ = ["ContextResolver"]
@@ -0,0 +1,212 @@
1
+ """Agent MCP integration - server configuration and tool loading.
2
+
3
+ Phase 4: Extracted from agent.py to eliminate C-rated complexity in with_mcps() and _get_mcp_tools().
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import logging
9
+ from collections.abc import Callable, Iterable
10
+ from typing import TYPE_CHECKING, Any
11
+
12
+ from flock.logging.logging import get_logger
13
+
14
+
15
+ if TYPE_CHECKING:
16
+ from flock.agent import MCPServerConfig
17
+ from flock.core import Flock
18
+ from flock.utils.runtime import Context
19
+
20
+
21
+ logger = get_logger(__name__)
22
+
23
+
24
+ class MCPIntegration:
25
+ """Handles MCP server configuration and tool loading for an agent.
26
+
27
+ This module encapsulates all MCP-related logic including:
28
+ - Server configuration parsing (dict, list, mixed formats)
29
+ - Tool loading and whitelisting
30
+ - Graceful degradation on failures
31
+ """
32
+
33
+ def __init__(self, agent_name: str, orchestrator: Flock):
34
+ """Initialize MCPIntegration for a specific agent.
35
+
36
+ Args:
37
+ agent_name: Name of the agent (for error messages and logging)
38
+ orchestrator: Flock orchestrator instance (for MCP manager access)
39
+ """
40
+ self._agent_name = agent_name
41
+ self._orchestrator = orchestrator
42
+ self._logger = logging.getLogger(__name__)
43
+
44
+ # Agent MCP state
45
+ self.mcp_server_names: set[str] = set()
46
+ self.mcp_server_mounts: dict[str, list[str]] = {}
47
+ self.tool_whitelist: list[str] | None = None
48
+
49
+ async def get_mcp_tools(self, ctx: Context) -> list[Callable]:
50
+ """Lazy-load MCP tools from assigned servers.
51
+
52
+ Architecture Decision: AD001 - Two-Level Architecture
53
+ Agents fetch tools from servers registered at orchestrator level.
54
+
55
+ Architecture Decision: AD003 - Tool Namespacing
56
+ All tools are namespaced as {server}__{tool}.
57
+
58
+ Architecture Decision: AD007 - Graceful Degradation
59
+ If MCP loading fails, returns empty list so agent continues with native tools.
60
+
61
+ Args:
62
+ ctx: Current execution context with agent_id and run_id
63
+
64
+ Returns:
65
+ List of DSPy-compatible tool callables
66
+ """
67
+ if not self.mcp_server_names:
68
+ # No MCP servers assigned to this agent
69
+ return []
70
+
71
+ try:
72
+ # Get the MCP manager from orchestrator
73
+ manager = self._orchestrator.get_mcp_manager()
74
+
75
+ # Fetch tools from all assigned servers
76
+ tools_dict = await manager.get_tools_for_agent(
77
+ agent_id=self._agent_name,
78
+ run_id=ctx.task_id,
79
+ server_names=self.mcp_server_names,
80
+ server_mounts=self.mcp_server_mounts, # Pass server-specific mounts
81
+ )
82
+
83
+ # Whitelisting logic
84
+ tool_whitelist = self.tool_whitelist
85
+ if (
86
+ tool_whitelist is not None
87
+ and isinstance(tool_whitelist, list)
88
+ and len(tool_whitelist) > 0
89
+ ):
90
+ filtered_tools: dict[str, Any] = {}
91
+ for tool_key, tool_entry in tools_dict.items():
92
+ if isinstance(tool_entry, dict):
93
+ original_name = tool_entry.get("original_name", None)
94
+ if (
95
+ original_name is not None
96
+ and original_name in tool_whitelist
97
+ ):
98
+ filtered_tools[tool_key] = tool_entry
99
+
100
+ tools_dict = filtered_tools
101
+
102
+ # Convert to DSPy tool callables
103
+ dspy_tools = []
104
+ for namespaced_name, tool_info in tools_dict.items():
105
+ tool_info["server_name"]
106
+ flock_tool = tool_info["tool"] # Already a FlockMCPTool
107
+ client = tool_info["client"]
108
+
109
+ # Convert to DSPy tool
110
+ dspy_tool = flock_tool.as_dspy_tool(server=client)
111
+
112
+ # Update name to include namespace
113
+ dspy_tool.name = namespaced_name
114
+
115
+ dspy_tools.append(dspy_tool)
116
+
117
+ return dspy_tools
118
+
119
+ except Exception as e:
120
+ # Architecture Decision: AD007 - Graceful Degradation
121
+ # Agent continues with native tools only
122
+ logger.error(
123
+ f"Failed to load MCP tools for agent {self._agent_name}: {e}",
124
+ exc_info=True,
125
+ )
126
+ return []
127
+
128
+ def configure_servers(
129
+ self,
130
+ servers: (Iterable[str] | dict[str, MCPServerConfig]),
131
+ registered_servers: set[str],
132
+ ) -> None:
133
+ """Configure MCP servers for this agent with optional server-specific mount points.
134
+
135
+ Architecture Decision: AD001 - Two-Level Architecture
136
+ Agents reference servers registered at orchestrator level.
137
+
138
+ Args:
139
+ servers: One of:
140
+ - List of server names (strings) - no specific mounts
141
+ - Dict mapping server names to MCPServerConfig
142
+ registered_servers: Set of server names registered with orchestrator (for validation)
143
+
144
+ Raises:
145
+ ValueError: If any server name is not registered with orchestrator
146
+ TypeError: If server specification format is invalid
147
+
148
+ Examples:
149
+ >>> # Simple: no mount restrictions
150
+ >>> integration.configure_servers(["filesystem", "github"], registered)
151
+
152
+ >>> # Server-specific config with roots and tool whitelist
153
+ >>> integration.configure_servers(
154
+ ... {
155
+ ... "filesystem": {
156
+ ... "roots": ["/workspace/dir/data"],
157
+ ... "tool_whitelist": ["read_file"],
158
+ ... },
159
+ ... "github": {}, # No restrictions for github
160
+ ... },
161
+ ... registered,
162
+ ... )
163
+ """
164
+ # Parse input into server_names and mounts
165
+ server_set: set[str] = set()
166
+ server_mounts: dict[str, list[str]] = {}
167
+ whitelist = None
168
+
169
+ if isinstance(servers, dict):
170
+ # Dict format: {"server": {"roots": ["/path1"], "tool_whitelist": ["tool1"]}}
171
+ for server_name, server_config in servers.items():
172
+ server_set.add(server_name)
173
+
174
+ if isinstance(server_config, dict):
175
+ # MCPServerConfig dict with optional roots and tool_whitelist
176
+ mounts = server_config.get("roots", None)
177
+ if (
178
+ mounts is not None
179
+ and isinstance(mounts, list)
180
+ and len(mounts) > 0
181
+ ):
182
+ server_mounts[server_name] = list(mounts)
183
+
184
+ config_whitelist = server_config.get("tool_whitelist", None)
185
+ if (
186
+ config_whitelist is not None
187
+ and isinstance(config_whitelist, list)
188
+ and len(config_whitelist) > 0
189
+ ):
190
+ whitelist = config_whitelist
191
+ else:
192
+ # Assume it's an iterable of strings
193
+ server_set = set(servers)
194
+
195
+ # Validate all servers exist in orchestrator
196
+ invalid_servers = server_set - registered_servers
197
+
198
+ if invalid_servers:
199
+ available = list(registered_servers) if registered_servers else ["none"]
200
+ raise ValueError(
201
+ f"MCP servers not registered: {invalid_servers}. "
202
+ f"Available servers: {available}. "
203
+ f"Register servers using orchestrator.add_mcp() first."
204
+ )
205
+
206
+ # Store in integration
207
+ self.mcp_server_names = server_set
208
+ self.mcp_server_mounts = server_mounts
209
+ self.tool_whitelist = whitelist
210
+
211
+
212
+ __all__ = ["MCPIntegration"]