flock-core 0.5.11__py3-none-any.whl → 0.5.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (94) hide show
  1. flock/__init__.py +1 -1
  2. flock/agent/__init__.py +30 -0
  3. flock/agent/builder_helpers.py +192 -0
  4. flock/agent/builder_validator.py +169 -0
  5. flock/agent/component_lifecycle.py +325 -0
  6. flock/agent/context_resolver.py +141 -0
  7. flock/agent/mcp_integration.py +212 -0
  8. flock/agent/output_processor.py +304 -0
  9. flock/api/__init__.py +20 -0
  10. flock/{api_models.py → api/models.py} +0 -2
  11. flock/{service.py → api/service.py} +3 -3
  12. flock/cli.py +2 -2
  13. flock/components/__init__.py +41 -0
  14. flock/components/agent/__init__.py +22 -0
  15. flock/{components.py → components/agent/base.py} +4 -3
  16. flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
  17. flock/components/orchestrator/__init__.py +22 -0
  18. flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
  19. flock/components/orchestrator/circuit_breaker.py +95 -0
  20. flock/components/orchestrator/collection.py +143 -0
  21. flock/components/orchestrator/deduplication.py +78 -0
  22. flock/core/__init__.py +30 -0
  23. flock/core/agent.py +953 -0
  24. flock/{artifacts.py → core/artifacts.py} +1 -1
  25. flock/{context_provider.py → core/context_provider.py} +3 -3
  26. flock/core/orchestrator.py +1102 -0
  27. flock/{store.py → core/store.py} +99 -454
  28. flock/{subscription.py → core/subscription.py} +1 -1
  29. flock/dashboard/collector.py +5 -5
  30. flock/dashboard/events.py +1 -1
  31. flock/dashboard/graph_builder.py +7 -7
  32. flock/dashboard/routes/__init__.py +21 -0
  33. flock/dashboard/routes/control.py +327 -0
  34. flock/dashboard/routes/helpers.py +340 -0
  35. flock/dashboard/routes/themes.py +76 -0
  36. flock/dashboard/routes/traces.py +521 -0
  37. flock/dashboard/routes/websocket.py +108 -0
  38. flock/dashboard/service.py +43 -1316
  39. flock/engines/dspy/__init__.py +20 -0
  40. flock/engines/dspy/artifact_materializer.py +216 -0
  41. flock/engines/dspy/signature_builder.py +474 -0
  42. flock/engines/dspy/streaming_executor.py +812 -0
  43. flock/engines/dspy_engine.py +45 -1330
  44. flock/engines/examples/simple_batch_engine.py +2 -2
  45. flock/engines/streaming/__init__.py +3 -0
  46. flock/engines/streaming/sinks.py +489 -0
  47. flock/examples.py +7 -7
  48. flock/logging/logging.py +1 -16
  49. flock/models/__init__.py +10 -0
  50. flock/orchestrator/__init__.py +45 -0
  51. flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
  52. flock/orchestrator/artifact_manager.py +168 -0
  53. flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
  54. flock/orchestrator/component_runner.py +389 -0
  55. flock/orchestrator/context_builder.py +167 -0
  56. flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
  57. flock/orchestrator/event_emitter.py +167 -0
  58. flock/orchestrator/initialization.py +184 -0
  59. flock/orchestrator/lifecycle_manager.py +226 -0
  60. flock/orchestrator/mcp_manager.py +202 -0
  61. flock/orchestrator/scheduler.py +189 -0
  62. flock/orchestrator/server_manager.py +234 -0
  63. flock/orchestrator/tracing.py +147 -0
  64. flock/storage/__init__.py +10 -0
  65. flock/storage/artifact_aggregator.py +158 -0
  66. flock/storage/in_memory/__init__.py +6 -0
  67. flock/storage/in_memory/artifact_filter.py +114 -0
  68. flock/storage/in_memory/history_aggregator.py +115 -0
  69. flock/storage/sqlite/__init__.py +10 -0
  70. flock/storage/sqlite/agent_history_queries.py +154 -0
  71. flock/storage/sqlite/consumption_loader.py +100 -0
  72. flock/storage/sqlite/query_builder.py +112 -0
  73. flock/storage/sqlite/query_params_builder.py +91 -0
  74. flock/storage/sqlite/schema_manager.py +168 -0
  75. flock/storage/sqlite/summary_queries.py +194 -0
  76. flock/utils/__init__.py +14 -0
  77. flock/utils/async_utils.py +67 -0
  78. flock/{runtime.py → utils/runtime.py} +3 -3
  79. flock/utils/time_utils.py +53 -0
  80. flock/utils/type_resolution.py +38 -0
  81. flock/{utilities.py → utils/utilities.py} +2 -2
  82. flock/utils/validation.py +57 -0
  83. flock/utils/visibility.py +79 -0
  84. flock/utils/visibility_utils.py +134 -0
  85. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/METADATA +19 -5
  86. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/RECORD +92 -34
  87. flock/agent.py +0 -1578
  88. flock/orchestrator.py +0 -1983
  89. /flock/{visibility.py → core/visibility.py} +0 -0
  90. /flock/{system_artifacts.py → models/system_artifacts.py} +0 -0
  91. /flock/{helper → utils}/cli_helper.py +0 -0
  92. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/WHEEL +0 -0
  93. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/entry_points.txt +0 -0
  94. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,226 @@
1
+ """Lifecycle management for background tasks and cleanup.
2
+
3
+ Phase 5A: Extracted from orchestrator.py to isolate background task coordination.
4
+
5
+ This module handles background tasks for batch timeouts and correlation cleanup,
6
+ reducing orchestrator complexity and centralizing async task management.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import asyncio
12
+ import logging
13
+ from asyncio import Task
14
+ from typing import TYPE_CHECKING, Any
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from flock.orchestrator.batch_accumulator import BatchEngine
19
+ from flock.orchestrator.correlation_engine import CorrelationEngine
20
+
21
+
22
+ class LifecycleManager:
23
+ """Manages background tasks for batch and correlation lifecycle.
24
+
25
+ This module centralizes all background task management for:
26
+ - Correlation group cleanup (time-based expiry)
27
+ - Batch timeout checking (timeout-based flushing)
28
+
29
+ Phase 5A: Extracted to reduce orchestrator complexity and improve testability.
30
+ """
31
+
32
+ def __init__(
33
+ self,
34
+ *,
35
+ correlation_engine: CorrelationEngine,
36
+ batch_engine: BatchEngine,
37
+ cleanup_interval: float = 0.1,
38
+ ):
39
+ """Initialize LifecycleManager with engines and intervals.
40
+
41
+ Args:
42
+ correlation_engine: Engine managing correlation groups
43
+ batch_engine: Engine managing batch accumulation
44
+ cleanup_interval: How often to check for expiry (seconds, default: 0.1)
45
+ """
46
+ self._correlation_engine = correlation_engine
47
+ self._batch_engine = batch_engine
48
+ self._cleanup_interval = cleanup_interval
49
+
50
+ # Background tasks
51
+ self._correlation_cleanup_task: Task[Any] | None = None
52
+ self._batch_timeout_task: Task[Any] | None = None
53
+
54
+ # Callback for batch timeout flushing (set by orchestrator)
55
+ self._batch_timeout_callback: Any | None = None
56
+
57
+ self._logger = logging.getLogger(__name__)
58
+
59
+ async def start_correlation_cleanup(self) -> None:
60
+ """Start background correlation cleanup loop if not already running.
61
+
62
+ This ensures expired correlation groups are periodically discarded.
63
+ Called when there are pending correlations during run_until_idle.
64
+ """
65
+ if (
66
+ self._correlation_cleanup_task is None
67
+ or self._correlation_cleanup_task.done()
68
+ ):
69
+ self._correlation_cleanup_task = asyncio.create_task(
70
+ self._correlation_cleanup_loop()
71
+ )
72
+
73
+ def set_batch_timeout_callback(self, callback: Any) -> None:
74
+ """Set the callback to invoke when batches timeout.
75
+
76
+ Args:
77
+ callback: Async function to call when timeout checking. Should handle
78
+ flushing expired batches and scheduling agent tasks.
79
+ """
80
+ self._batch_timeout_callback = callback
81
+
82
+ async def start_batch_timeout_checker(self) -> None:
83
+ """Start background batch timeout checker loop if not already running.
84
+
85
+ This ensures timeout-expired batches are periodically flushed.
86
+ Called when there are pending batches during run_until_idle.
87
+ """
88
+ if self._batch_timeout_task is None or self._batch_timeout_task.done():
89
+ self._batch_timeout_task = asyncio.create_task(
90
+ self._batch_timeout_checker_loop()
91
+ )
92
+
93
+ async def shutdown(self) -> None:
94
+ """Cancel and cleanup all background tasks.
95
+
96
+ Called during orchestrator shutdown to ensure clean resource cleanup.
97
+ """
98
+ # Cancel correlation cleanup task if running
99
+ if self._correlation_cleanup_task and not self._correlation_cleanup_task.done():
100
+ self._correlation_cleanup_task.cancel()
101
+ try:
102
+ await self._correlation_cleanup_task
103
+ except asyncio.CancelledError:
104
+ pass
105
+
106
+ # Cancel batch timeout checker if running
107
+ if self._batch_timeout_task and not self._batch_timeout_task.done():
108
+ self._batch_timeout_task.cancel()
109
+ try:
110
+ await self._batch_timeout_task
111
+ except asyncio.CancelledError:
112
+ pass
113
+
114
+ # Background Loops ─────────────────────────────────────────────────────
115
+
116
+ async def _correlation_cleanup_loop(self) -> None:
117
+ """Background task that periodically cleans up expired correlation groups.
118
+
119
+ Runs continuously until all correlation groups are cleared or orchestrator shuts down.
120
+ Checks every 100ms for time-based expired correlations and discards them.
121
+ """
122
+ try:
123
+ while True:
124
+ await asyncio.sleep(self._cleanup_interval)
125
+ self._cleanup_expired_correlations()
126
+
127
+ # Stop if no correlation groups remain
128
+ if not self._correlation_engine.correlation_groups:
129
+ self._correlation_cleanup_task = None
130
+ break
131
+ except asyncio.CancelledError:
132
+ # Clean shutdown
133
+ self._correlation_cleanup_task = None
134
+ raise
135
+
136
+ def _cleanup_expired_correlations(self) -> None:
137
+ """Clean up all expired correlation groups across all subscriptions.
138
+
139
+ Called periodically by background task to enforce time-based correlation windows.
140
+ Discards incomplete correlations that have exceeded their time window.
141
+ """
142
+ # Get all active subscription keys
143
+ for agent_name, subscription_index in list(
144
+ self._correlation_engine.correlation_groups.keys()
145
+ ):
146
+ self._correlation_engine.cleanup_expired(agent_name, subscription_index)
147
+
148
+ async def _batch_timeout_checker_loop(self) -> None:
149
+ """Background task that periodically checks for batch timeouts.
150
+
151
+ Runs continuously until all batches are cleared or orchestrator shuts down.
152
+ Checks every 100ms for expired batches and flushes them via callback.
153
+ """
154
+ try:
155
+ while True:
156
+ await asyncio.sleep(self._cleanup_interval)
157
+
158
+ # Call the timeout callback to check and flush expired batches
159
+ if self._batch_timeout_callback:
160
+ await self._batch_timeout_callback()
161
+
162
+ # Stop if no batches remain
163
+ if not self._batch_engine.batches:
164
+ self._batch_timeout_task = None
165
+ break
166
+ except asyncio.CancelledError:
167
+ # Clean shutdown
168
+ self._batch_timeout_task = None
169
+ raise
170
+
171
+ # Helper Methods ───────────────────────────────────────────────────────
172
+
173
+ async def check_batch_timeouts(self, orchestrator_callback: Any) -> None:
174
+ """Check all batches for timeout expiry and invoke callback for expired batches.
175
+
176
+ This method is called periodically by the background timeout checker
177
+ or manually (in tests) to enforce timeout-based batching.
178
+
179
+ Args:
180
+ orchestrator_callback: Async function to call for each expired batch.
181
+ Signature: async def callback(agent_name: str, subscription_index: int,
182
+ artifacts: list[Artifact]) -> None
183
+ """
184
+ expired_batches = self._batch_engine.check_timeouts()
185
+
186
+ for agent_name, subscription_index in expired_batches:
187
+ # Flush the expired batch
188
+ artifacts = self._batch_engine.flush_batch(agent_name, subscription_index)
189
+
190
+ if artifacts is not None:
191
+ # Invoke orchestrator callback to schedule task
192
+ await orchestrator_callback(agent_name, subscription_index, artifacts)
193
+
194
+ async def flush_all_batches(self, orchestrator_callback: Any) -> None:
195
+ """Flush all partial batches (for shutdown - ensures zero data loss).
196
+
197
+ Args:
198
+ orchestrator_callback: Async function to call for each flushed batch.
199
+ Signature: async def callback(agent_name: str, subscription_index: int,
200
+ artifacts: list[Artifact]) -> None
201
+ """
202
+ all_batches = self._batch_engine.flush_all()
203
+
204
+ for agent_name, subscription_index, artifacts in all_batches:
205
+ # Invoke orchestrator callback to schedule task
206
+ await orchestrator_callback(agent_name, subscription_index, artifacts)
207
+
208
+ # Properties ───────────────────────────────────────────────────────────
209
+
210
+ @property
211
+ def has_pending_correlations(self) -> bool:
212
+ """Check if there are any pending correlation groups."""
213
+ return any(
214
+ groups and any(group.waiting_artifacts for group in groups.values())
215
+ for groups in self._correlation_engine.correlation_groups.values()
216
+ )
217
+
218
+ @property
219
+ def has_pending_batches(self) -> bool:
220
+ """Check if there are any pending batches."""
221
+ return any(
222
+ accumulator.artifacts for accumulator in self._batch_engine.batches.values()
223
+ )
224
+
225
+
226
+ __all__ = ["LifecycleManager"]
@@ -0,0 +1,202 @@
1
+ """MCP (Model Context Protocol) server management for orchestrator.
2
+
3
+ This module handles MCP server registration and client manager lifecycle.
4
+ Implements lazy connection establishment pattern (AD005).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from flock.mcp import (
14
+ FlockMCPClientManager,
15
+ FlockMCPConfiguration,
16
+ ServerParameters,
17
+ )
18
+
19
+
20
+ class MCPManager:
21
+ """Manages MCP server registration and client connections.
22
+
23
+ Architecture Decision: AD001 - Two-Level Architecture
24
+ MCP servers are registered at orchestrator level and assigned to agents.
25
+
26
+ Architecture Decision: AD005 - Lazy Connection Establishment
27
+ Connections are established only when get_mcp_manager() is first called.
28
+
29
+ Attributes:
30
+ _configs: Dict mapping server names to their configurations
31
+ _client_manager: Lazy-initialized MCP client manager instance
32
+ """
33
+
34
+ def __init__(self) -> None:
35
+ """Initialize the MCP manager with empty configuration."""
36
+ self._configs: dict[str, FlockMCPConfiguration] = {}
37
+ self._client_manager: FlockMCPClientManager | None = None
38
+
39
+ def add_mcp(
40
+ self,
41
+ name: str,
42
+ connection_params: ServerParameters,
43
+ *,
44
+ enable_tools_feature: bool = True,
45
+ enable_prompts_feature: bool = True,
46
+ enable_sampling_feature: bool = True,
47
+ enable_roots_feature: bool = True,
48
+ mount_points: list[str] | None = None,
49
+ tool_whitelist: list[str] | None = None,
50
+ read_timeout_seconds: float = 300,
51
+ max_retries: int = 3,
52
+ **kwargs,
53
+ ) -> None:
54
+ """Register an MCP server configuration.
55
+
56
+ Args:
57
+ name: Unique identifier for this MCP server
58
+ connection_params: Server connection parameters
59
+ enable_tools_feature: Enable tool execution
60
+ enable_prompts_feature: Enable prompt templates
61
+ enable_sampling_feature: Enable LLM sampling requests
62
+ enable_roots_feature: Enable filesystem roots
63
+ mount_points: Optional list of filesystem mount points
64
+ tool_whitelist: Optional list of tool names to allow
65
+ read_timeout_seconds: Timeout for server communications
66
+ max_retries: Connection retry attempts
67
+
68
+ Raises:
69
+ ValueError: If server name already registered
70
+ """
71
+ if name in self._configs:
72
+ raise ValueError(f"MCP server '{name}' is already registered.")
73
+
74
+ # Import configuration types
75
+ from flock.mcp import (
76
+ FlockMCPConfiguration,
77
+ FlockMCPConnectionConfiguration,
78
+ FlockMCPFeatureConfiguration,
79
+ )
80
+
81
+ # Detect transport type
82
+ from flock.mcp.types import (
83
+ SseServerParameters,
84
+ StdioServerParameters,
85
+ StreamableHttpServerParameters,
86
+ WebsocketServerParameters,
87
+ )
88
+
89
+ if isinstance(connection_params, StdioServerParameters):
90
+ transport_type = "stdio"
91
+ elif isinstance(connection_params, WebsocketServerParameters):
92
+ transport_type = "websockets"
93
+ elif isinstance(connection_params, SseServerParameters):
94
+ transport_type = "sse"
95
+ elif isinstance(connection_params, StreamableHttpServerParameters):
96
+ transport_type = "streamable_http"
97
+ else:
98
+ transport_type = "custom"
99
+
100
+ # Process mount points (convert paths to URIs)
101
+ mcp_roots = None
102
+ if mount_points:
103
+ from pathlib import Path as PathLib
104
+
105
+ from flock.mcp.types import MCPRoot
106
+
107
+ mcp_roots = []
108
+ for path in mount_points:
109
+ # Normalize the path
110
+ if path.startswith("file://"):
111
+ # Already a file URI
112
+ uri = path
113
+ # Extract path from URI for name
114
+ path_str = path.replace("file://", "")
115
+ # the test:// path-prefix is used by testing servers such as the mcp-everything server.
116
+ elif path.startswith("test://"):
117
+ # Already a test URI
118
+ uri = path
119
+ # Extract path from URI for name
120
+ path_str = path.replace("test://", "")
121
+ else:
122
+ # Convert to absolute path and create URI
123
+ abs_path = PathLib(path).resolve()
124
+ uri = f"file://{abs_path}"
125
+ path_str = str(abs_path)
126
+
127
+ # Extract a meaningful name (last component of path)
128
+ name_component = (
129
+ PathLib(path_str).name
130
+ or path_str.rstrip("/").split("/")[-1]
131
+ or "root"
132
+ )
133
+ mcp_roots.append(MCPRoot(uri=uri, name=name_component))
134
+
135
+ # Build configuration
136
+ connection_config = FlockMCPConnectionConfiguration(
137
+ max_retries=max_retries,
138
+ connection_parameters=connection_params,
139
+ transport_type=transport_type,
140
+ read_timeout_seconds=read_timeout_seconds,
141
+ mount_points=mcp_roots,
142
+ )
143
+
144
+ feature_config = FlockMCPFeatureConfiguration(
145
+ tools_enabled=enable_tools_feature,
146
+ prompts_enabled=enable_prompts_feature,
147
+ sampling_enabled=enable_sampling_feature,
148
+ roots_enabled=enable_roots_feature,
149
+ tool_whitelist=tool_whitelist,
150
+ )
151
+
152
+ mcp_config = FlockMCPConfiguration(
153
+ name=name,
154
+ connection_config=connection_config,
155
+ feature_config=feature_config,
156
+ )
157
+
158
+ self._configs[name] = mcp_config
159
+
160
+ def get_mcp_manager(self) -> FlockMCPClientManager:
161
+ """Get or create the MCP client manager.
162
+
163
+ Architecture Decision: AD005 - Lazy Connection Establishment
164
+ Connections are established only when this method is first called.
165
+
166
+ Returns:
167
+ FlockMCPClientManager instance
168
+
169
+ Raises:
170
+ RuntimeError: If no MCP servers registered
171
+ """
172
+ if not self._configs:
173
+ raise RuntimeError("No MCP servers registered. Call add_mcp() first.")
174
+
175
+ if self._client_manager is None:
176
+ from flock.mcp import FlockMCPClientManager
177
+
178
+ self._client_manager = FlockMCPClientManager(self._configs)
179
+
180
+ return self._client_manager
181
+
182
+ async def cleanup(self) -> None:
183
+ """Clean up MCP connections.
184
+
185
+ Called during orchestrator shutdown to properly close all MCP connections.
186
+ """
187
+ if self._client_manager is not None:
188
+ await self._client_manager.cleanup_all()
189
+ self._client_manager = None
190
+
191
+ @property
192
+ def configs(self) -> dict[str, FlockMCPConfiguration]:
193
+ """Get the dictionary of MCP configurations."""
194
+ return self._configs
195
+
196
+ @property
197
+ def has_configs(self) -> bool:
198
+ """Check if any MCP servers are registered."""
199
+ return bool(self._configs)
200
+
201
+
202
+ __all__ = ["MCPManager"]
@@ -0,0 +1,189 @@
1
+ """Agent scheduling engine."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import asyncio
6
+ from asyncio import Task
7
+ from typing import TYPE_CHECKING, Any
8
+
9
+ from flock.components.orchestrator import ScheduleDecision
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from flock.agent import Agent
14
+ from flock.core import Flock
15
+ from flock.core.artifacts import Artifact
16
+ from flock.core.visibility import AgentIdentity
17
+ from flock.orchestrator import ComponentRunner
18
+
19
+
20
+ class AgentScheduler:
21
+ """Schedules agents for execution based on artifact subscriptions.
22
+
23
+ Responsibilities:
24
+ - Match artifacts to agent subscriptions
25
+ - Run scheduling hooks via ComponentRunner
26
+ - Create agent execution tasks
27
+ - Manage task lifecycle
28
+ - Track processed artifacts for deduplication
29
+ """
30
+
31
+ def __init__(self, orchestrator: Flock, component_runner: ComponentRunner):
32
+ """Initialize scheduler.
33
+
34
+ Args:
35
+ orchestrator: Flock orchestrator instance
36
+ component_runner: Runner for executing component hooks
37
+ """
38
+ self._orchestrator = orchestrator
39
+ self._component_runner = component_runner
40
+ self._tasks: set[Task[Any]] = set()
41
+ self._processed: set[tuple[str, str]] = set()
42
+ self._logger = orchestrator._logger
43
+
44
+ async def schedule_artifact(self, artifact: Artifact) -> None:
45
+ """Schedule agents for an artifact using component hooks.
46
+
47
+ Args:
48
+ artifact: Published artifact to match against subscriptions
49
+ """
50
+ # Initialize components on first artifact
51
+ if not self._component_runner.is_initialized:
52
+ await self._component_runner.run_initialize(self._orchestrator)
53
+
54
+ # Component hook - artifact published (can transform or block)
55
+ artifact = await self._component_runner.run_artifact_published(
56
+ self._orchestrator, artifact
57
+ )
58
+ if artifact is None:
59
+ return # Artifact blocked by component
60
+
61
+ for agent in self._orchestrator.agents:
62
+ identity = agent.identity
63
+ for subscription in agent.subscriptions:
64
+ if not subscription.accepts_events():
65
+ continue
66
+
67
+ # Check prevent_self_trigger
68
+ if agent.prevent_self_trigger and artifact.produced_by == agent.name:
69
+ continue # Skip - agent produced this artifact
70
+
71
+ # Visibility check
72
+ if not self._check_visibility(artifact, identity):
73
+ continue
74
+
75
+ # Subscription match check
76
+ if not subscription.matches(artifact):
77
+ continue
78
+
79
+ # Component hook - before schedule (circuit breaker, deduplication)
80
+ decision = await self._component_runner.run_before_schedule(
81
+ self._orchestrator, artifact, agent, subscription
82
+ )
83
+ if decision == ScheduleDecision.SKIP:
84
+ continue
85
+ if decision == ScheduleDecision.DEFER:
86
+ continue
87
+
88
+ # Component hook - collect artifacts (AND gates, correlation, batching)
89
+ collection = await self._component_runner.run_collect_artifacts(
90
+ self._orchestrator, artifact, agent, subscription
91
+ )
92
+ if not collection.complete:
93
+ continue # Still collecting
94
+
95
+ artifacts = collection.artifacts
96
+
97
+ # Component hook - before agent schedule (final validation)
98
+ artifacts = await self._component_runner.run_before_agent_schedule(
99
+ self._orchestrator, agent, artifacts
100
+ )
101
+ if artifacts is None:
102
+ continue # Scheduling blocked
103
+
104
+ # Schedule agent task
105
+ is_batch_execution = subscription.batch is not None
106
+ task = self.schedule_task(agent, artifacts, is_batch=is_batch_execution)
107
+
108
+ # Component hook - agent scheduled (notification)
109
+ await self._component_runner.run_agent_scheduled(
110
+ self._orchestrator, agent, artifacts, task
111
+ )
112
+
113
+ def schedule_task(
114
+ self, agent: Agent, artifacts: list[Artifact], is_batch: bool = False
115
+ ) -> Task[Any]:
116
+ """Schedule agent task and return the task handle.
117
+
118
+ Args:
119
+ agent: Agent to execute
120
+ artifacts: Input artifacts
121
+ is_batch: Whether this is batch execution
122
+
123
+ Returns:
124
+ Asyncio task handle
125
+ """
126
+ task = asyncio.create_task(
127
+ self._orchestrator._run_agent_task(agent, artifacts, is_batch=is_batch)
128
+ )
129
+ self._tasks.add(task)
130
+ task.add_done_callback(self._tasks.discard)
131
+ return task
132
+
133
+ def record_agent_run(self, agent: Agent) -> None:
134
+ """Record agent run metric.
135
+
136
+ Args:
137
+ agent: Agent that ran
138
+ """
139
+ self._orchestrator.metrics["agent_runs"] += 1
140
+
141
+ def mark_processed(self, artifact: Artifact, agent: Agent) -> None:
142
+ """Mark artifact as processed by agent.
143
+
144
+ Args:
145
+ artifact: Processed artifact
146
+ agent: Agent that processed it
147
+ """
148
+ key = (str(artifact.id), agent.name)
149
+ self._processed.add(key)
150
+
151
+ def seen_before(self, artifact: Artifact, agent: Agent) -> bool:
152
+ """Check if artifact was already processed by agent.
153
+
154
+ Args:
155
+ artifact: Artifact to check
156
+ agent: Agent to check
157
+
158
+ Returns:
159
+ True if already processed
160
+ """
161
+ key = (str(artifact.id), agent.name)
162
+ return key in self._processed
163
+
164
+ def _check_visibility(self, artifact: Artifact, identity: AgentIdentity) -> bool:
165
+ """Check if artifact is visible to agent.
166
+
167
+ Args:
168
+ artifact: Artifact to check
169
+ identity: Agent identity
170
+
171
+ Returns:
172
+ True if visible
173
+ """
174
+ try:
175
+ return artifact.visibility.allows(identity)
176
+ except AttributeError: # pragma: no cover - fallback
177
+ return True
178
+
179
+ @property
180
+ def pending_tasks(self) -> set[Task[Any]]:
181
+ """Get set of pending agent tasks.
182
+
183
+ Returns:
184
+ Set of asyncio tasks
185
+ """
186
+ return self._tasks
187
+
188
+
189
+ __all__ = ["AgentScheduler"]