flock-core 0.5.11__py3-none-any.whl → 0.5.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (94) hide show
  1. flock/__init__.py +1 -1
  2. flock/agent/__init__.py +30 -0
  3. flock/agent/builder_helpers.py +192 -0
  4. flock/agent/builder_validator.py +169 -0
  5. flock/agent/component_lifecycle.py +325 -0
  6. flock/agent/context_resolver.py +141 -0
  7. flock/agent/mcp_integration.py +212 -0
  8. flock/agent/output_processor.py +304 -0
  9. flock/api/__init__.py +20 -0
  10. flock/{api_models.py → api/models.py} +0 -2
  11. flock/{service.py → api/service.py} +3 -3
  12. flock/cli.py +2 -2
  13. flock/components/__init__.py +41 -0
  14. flock/components/agent/__init__.py +22 -0
  15. flock/{components.py → components/agent/base.py} +4 -3
  16. flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
  17. flock/components/orchestrator/__init__.py +22 -0
  18. flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
  19. flock/components/orchestrator/circuit_breaker.py +95 -0
  20. flock/components/orchestrator/collection.py +143 -0
  21. flock/components/orchestrator/deduplication.py +78 -0
  22. flock/core/__init__.py +30 -0
  23. flock/core/agent.py +953 -0
  24. flock/{artifacts.py → core/artifacts.py} +1 -1
  25. flock/{context_provider.py → core/context_provider.py} +3 -3
  26. flock/core/orchestrator.py +1102 -0
  27. flock/{store.py → core/store.py} +99 -454
  28. flock/{subscription.py → core/subscription.py} +1 -1
  29. flock/dashboard/collector.py +5 -5
  30. flock/dashboard/events.py +1 -1
  31. flock/dashboard/graph_builder.py +7 -7
  32. flock/dashboard/routes/__init__.py +21 -0
  33. flock/dashboard/routes/control.py +327 -0
  34. flock/dashboard/routes/helpers.py +340 -0
  35. flock/dashboard/routes/themes.py +76 -0
  36. flock/dashboard/routes/traces.py +521 -0
  37. flock/dashboard/routes/websocket.py +108 -0
  38. flock/dashboard/service.py +43 -1316
  39. flock/engines/dspy/__init__.py +20 -0
  40. flock/engines/dspy/artifact_materializer.py +216 -0
  41. flock/engines/dspy/signature_builder.py +474 -0
  42. flock/engines/dspy/streaming_executor.py +812 -0
  43. flock/engines/dspy_engine.py +45 -1330
  44. flock/engines/examples/simple_batch_engine.py +2 -2
  45. flock/engines/streaming/__init__.py +3 -0
  46. flock/engines/streaming/sinks.py +489 -0
  47. flock/examples.py +7 -7
  48. flock/logging/logging.py +1 -16
  49. flock/models/__init__.py +10 -0
  50. flock/orchestrator/__init__.py +45 -0
  51. flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
  52. flock/orchestrator/artifact_manager.py +168 -0
  53. flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
  54. flock/orchestrator/component_runner.py +389 -0
  55. flock/orchestrator/context_builder.py +167 -0
  56. flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
  57. flock/orchestrator/event_emitter.py +167 -0
  58. flock/orchestrator/initialization.py +184 -0
  59. flock/orchestrator/lifecycle_manager.py +226 -0
  60. flock/orchestrator/mcp_manager.py +202 -0
  61. flock/orchestrator/scheduler.py +189 -0
  62. flock/orchestrator/server_manager.py +234 -0
  63. flock/orchestrator/tracing.py +147 -0
  64. flock/storage/__init__.py +10 -0
  65. flock/storage/artifact_aggregator.py +158 -0
  66. flock/storage/in_memory/__init__.py +6 -0
  67. flock/storage/in_memory/artifact_filter.py +114 -0
  68. flock/storage/in_memory/history_aggregator.py +115 -0
  69. flock/storage/sqlite/__init__.py +10 -0
  70. flock/storage/sqlite/agent_history_queries.py +154 -0
  71. flock/storage/sqlite/consumption_loader.py +100 -0
  72. flock/storage/sqlite/query_builder.py +112 -0
  73. flock/storage/sqlite/query_params_builder.py +91 -0
  74. flock/storage/sqlite/schema_manager.py +168 -0
  75. flock/storage/sqlite/summary_queries.py +194 -0
  76. flock/utils/__init__.py +14 -0
  77. flock/utils/async_utils.py +67 -0
  78. flock/{runtime.py → utils/runtime.py} +3 -3
  79. flock/utils/time_utils.py +53 -0
  80. flock/utils/type_resolution.py +38 -0
  81. flock/{utilities.py → utils/utilities.py} +2 -2
  82. flock/utils/validation.py +57 -0
  83. flock/utils/visibility.py +79 -0
  84. flock/utils/visibility_utils.py +134 -0
  85. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/METADATA +19 -5
  86. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/RECORD +92 -34
  87. flock/agent.py +0 -1578
  88. flock/orchestrator.py +0 -1983
  89. /flock/{visibility.py → core/visibility.py} +0 -0
  90. /flock/{system_artifacts.py → models/system_artifacts.py} +0 -0
  91. /flock/{helper → utils}/cli_helper.py +0 -0
  92. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/WHEEL +0 -0
  93. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/entry_points.txt +0 -0
  94. {flock_core-0.5.11.dist-info → flock_core-0.5.21.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,234 @@
1
+ """HTTP server management for orchestrator.
2
+
3
+ Handles service startup with optional dashboard integration.
4
+ Extracted from orchestrator.py to reduce complexity.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ from asyncio import Task
11
+ from pathlib import Path
12
+ from typing import TYPE_CHECKING, Any
13
+
14
+
15
+ if TYPE_CHECKING:
16
+ from flock.core.orchestrator import Flock
17
+
18
+
19
+ class ServerManager:
20
+ """Manages HTTP service startup for the orchestrator.
21
+
22
+ Handles both standard API mode and dashboard mode with WebSocket support.
23
+ """
24
+
25
+ @staticmethod
26
+ async def serve(
27
+ orchestrator: Flock,
28
+ *,
29
+ dashboard: bool = False,
30
+ dashboard_v2: bool = False,
31
+ host: str = "127.0.0.1",
32
+ port: int = 8344,
33
+ blocking: bool = True,
34
+ ) -> Task[None] | None:
35
+ """Start HTTP service for the orchestrator.
36
+
37
+ Args:
38
+ orchestrator: The Flock orchestrator instance to serve
39
+ dashboard: Enable real-time dashboard with WebSocket support (default: False)
40
+ dashboard_v2: Launch the new dashboard v2 frontend (implies dashboard=True)
41
+ host: Host to bind to (default: "127.0.0.1")
42
+ port: Port to bind to (default: 8344)
43
+ blocking: If True, blocks until server stops. If False, starts server
44
+ in background and returns task handle (default: True)
45
+
46
+ Returns:
47
+ None if blocking=True, or Task handle if blocking=False
48
+
49
+ Examples:
50
+ # Basic HTTP API (no dashboard) - runs until interrupted
51
+ await ServerManager.serve(orchestrator)
52
+
53
+ # With dashboard (WebSocket + browser launch) - runs until interrupted
54
+ await ServerManager.serve(orchestrator, dashboard=True)
55
+
56
+ # Non-blocking mode - start server in background
57
+ task = await ServerManager.serve(orchestrator, dashboard=True, blocking=False)
58
+ # Now you can publish messages and run other logic
59
+ await orchestrator.publish(my_message)
60
+ await orchestrator.run_until_idle()
61
+ """
62
+ # If non-blocking, start server in background task
63
+ if not blocking:
64
+ server_task = asyncio.create_task(
65
+ ServerManager._serve_impl(
66
+ orchestrator,
67
+ dashboard=dashboard,
68
+ dashboard_v2=dashboard_v2,
69
+ host=host,
70
+ port=port,
71
+ )
72
+ )
73
+ # Add cleanup callback
74
+ server_task.add_done_callback(
75
+ lambda task: ServerManager._cleanup_server_callback(orchestrator, task)
76
+ )
77
+ # Store task reference for later cancellation
78
+ orchestrator._server_task = server_task
79
+ # Give server a moment to start
80
+ await asyncio.sleep(0.1)
81
+ return server_task
82
+
83
+ # Blocking mode - run server directly with cleanup
84
+ try:
85
+ await ServerManager._serve_impl(
86
+ orchestrator,
87
+ dashboard=dashboard,
88
+ dashboard_v2=dashboard_v2,
89
+ host=host,
90
+ port=port,
91
+ )
92
+ finally:
93
+ # In blocking mode, manually cleanup dashboard launcher
94
+ if (
95
+ hasattr(orchestrator, "_dashboard_launcher")
96
+ and orchestrator._dashboard_launcher is not None
97
+ ):
98
+ orchestrator._dashboard_launcher.stop()
99
+ orchestrator._dashboard_launcher = None
100
+ return None
101
+
102
+ @staticmethod
103
+ def _cleanup_server_callback(orchestrator: Flock, task: Task[None]) -> None:
104
+ """Cleanup callback when background server task completes."""
105
+ # Stop dashboard launcher if it was started
106
+ if (
107
+ hasattr(orchestrator, "_dashboard_launcher")
108
+ and orchestrator._dashboard_launcher is not None
109
+ ):
110
+ try:
111
+ orchestrator._dashboard_launcher.stop()
112
+ except Exception as e:
113
+ orchestrator._logger.warning(f"Failed to stop dashboard launcher: {e}")
114
+ finally:
115
+ orchestrator._dashboard_launcher = None
116
+
117
+ # Clear server task reference
118
+ if hasattr(orchestrator, "_server_task"):
119
+ orchestrator._server_task = None
120
+
121
+ # Log any exceptions from the task
122
+ try:
123
+ exc = task.exception()
124
+ if exc and not isinstance(exc, asyncio.CancelledError):
125
+ orchestrator._logger.error(f"Server task failed: {exc}", exc_info=exc)
126
+ except asyncio.CancelledError:
127
+ pass # Normal cancellation
128
+
129
+ @staticmethod
130
+ async def _serve_impl(
131
+ orchestrator: Flock,
132
+ *,
133
+ dashboard: bool = False,
134
+ dashboard_v2: bool = False,
135
+ host: str = "127.0.0.1",
136
+ port: int = 8344,
137
+ ) -> None:
138
+ """Internal implementation of serve() - actual server logic."""
139
+ if dashboard_v2:
140
+ dashboard = True
141
+
142
+ if not dashboard:
143
+ # Standard service without dashboard
144
+ await ServerManager._serve_standard(orchestrator, host=host, port=port)
145
+ return
146
+
147
+ # Dashboard mode with WebSocket and event collection
148
+ await ServerManager._serve_dashboard(
149
+ orchestrator, dashboard_v2=dashboard_v2, host=host, port=port
150
+ )
151
+
152
+ @staticmethod
153
+ async def _serve_standard(orchestrator: Flock, *, host: str, port: int) -> None:
154
+ """Serve standard HTTP API without dashboard.
155
+
156
+ Args:
157
+ orchestrator: The Flock orchestrator instance
158
+ host: Host to bind to
159
+ port: Port to bind to
160
+ """
161
+ from flock.api.service import BlackboardHTTPService
162
+
163
+ service = BlackboardHTTPService(orchestrator)
164
+ await service.run_async(host=host, port=port)
165
+
166
+ @staticmethod
167
+ async def _serve_dashboard(
168
+ orchestrator: Flock, *, dashboard_v2: bool, host: str, port: int
169
+ ) -> None:
170
+ """Serve HTTP API with dashboard and WebSocket support.
171
+
172
+ Args:
173
+ orchestrator: The Flock orchestrator instance
174
+ dashboard_v2: Whether to use v2 dashboard frontend
175
+ host: Host to bind to
176
+ port: Port to bind to
177
+ """
178
+ from flock.core import Agent
179
+ from flock.dashboard.collector import DashboardEventCollector
180
+ from flock.dashboard.launcher import DashboardLauncher
181
+ from flock.dashboard.service import DashboardHTTPService
182
+ from flock.dashboard.websocket import WebSocketManager
183
+
184
+ # Create dashboard components
185
+ websocket_manager = WebSocketManager()
186
+ event_collector = DashboardEventCollector(store=orchestrator.store)
187
+ event_collector.set_websocket_manager(websocket_manager)
188
+ await event_collector.load_persistent_snapshots()
189
+
190
+ # Store collector reference for agents added later
191
+ orchestrator._dashboard_collector = event_collector
192
+ # Store websocket manager for real-time event emission (Phase 1.2)
193
+ orchestrator._websocket_manager = websocket_manager
194
+ # Phase 5A: Set websocket manager on EventEmitter for dashboard updates
195
+ orchestrator._event_emitter.set_websocket_manager(websocket_manager)
196
+
197
+ # Phase 6+7: Set class-level WebSocket broadcast wrapper (dashboard mode)
198
+ async def _broadcast_wrapper(event):
199
+ """Isolated broadcast wrapper - no reference chain to orchestrator."""
200
+ return await websocket_manager.broadcast(event)
201
+
202
+ Agent._websocket_broadcast_global = _broadcast_wrapper
203
+
204
+ # Inject event collector into all existing agents
205
+ for agent in orchestrator._agents.values():
206
+ # Add dashboard collector with priority ordering handled by agent
207
+ agent._add_utilities([event_collector])
208
+
209
+ # Start dashboard launcher (npm process + browser)
210
+ launcher_kwargs: dict[str, Any] = {"port": port}
211
+ if dashboard_v2:
212
+ dashboard_pkg_dir = Path(__file__).parent.parent / "dashboard"
213
+ launcher_kwargs["frontend_dir"] = dashboard_pkg_dir.parent / "frontend_v2"
214
+ launcher_kwargs["static_dir"] = dashboard_pkg_dir / "static_v2"
215
+
216
+ launcher = DashboardLauncher(**launcher_kwargs)
217
+ launcher.start()
218
+
219
+ # Create dashboard HTTP service
220
+ service = DashboardHTTPService(
221
+ orchestrator=orchestrator,
222
+ websocket_manager=websocket_manager,
223
+ event_collector=event_collector,
224
+ use_v2=dashboard_v2,
225
+ )
226
+
227
+ # Store launcher for cleanup
228
+ orchestrator._dashboard_launcher = launcher
229
+
230
+ # Run service (blocking call)
231
+ # Note: Cleanup is NOT done here - it's handled by:
232
+ # - ServerManager.serve() finally block (blocking mode)
233
+ # - ServerManager._cleanup_server_callback() (non-blocking mode)
234
+ await service.run_async(host=host, port=port)
@@ -0,0 +1,147 @@
1
+ """Unified tracing utilities for orchestrator workflows.
2
+
3
+ Handles OpenTelemetry workflow spans and trace database management.
4
+ Extracted from orchestrator.py to reduce complexity.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from contextlib import asynccontextmanager
10
+ from pathlib import Path
11
+ from typing import TYPE_CHECKING, Any
12
+
13
+ from opentelemetry import trace
14
+ from opentelemetry.trace import Status, StatusCode
15
+
16
+
17
+ if TYPE_CHECKING:
18
+ from collections.abc import AsyncGenerator
19
+
20
+
21
+ class TracingManager:
22
+ """Manages unified tracing for orchestrator workflows.
23
+
24
+ Provides workflow span creation and trace database cleanup utilities.
25
+ """
26
+
27
+ def __init__(self) -> None:
28
+ """Initialize tracing manager."""
29
+ self._workflow_span = None
30
+
31
+ @asynccontextmanager
32
+ async def traced_run(
33
+ self, name: str = "workflow", flock_id: str | None = None
34
+ ) -> AsyncGenerator[Any, None]:
35
+ """Context manager for wrapping an entire execution in a single unified trace.
36
+
37
+ This creates a parent span that encompasses all operations (publish, run_until_idle, etc.)
38
+ within the context, ensuring they all belong to the same trace_id for better observability.
39
+
40
+ Args:
41
+ name: Name for the workflow trace (default: "workflow")
42
+ flock_id: Optional Flock instance ID for attribution
43
+
44
+ Yields:
45
+ The workflow span for optional manual attribute setting
46
+
47
+ Examples:
48
+ # Explicit workflow tracing (recommended)
49
+ async with tracing_manager.traced_run("pizza_workflow"):
50
+ await flock.publish(pizza_idea)
51
+ await flock.run_until_idle()
52
+ # All operations now share the same trace_id!
53
+
54
+ # Custom attributes
55
+ async with tracing_manager.traced_run("data_pipeline") as span:
56
+ span.set_attribute("pipeline.version", "2.0")
57
+ await flock.publish(data)
58
+ await flock.run_until_idle()
59
+ """
60
+ tracer = trace.get_tracer(__name__)
61
+ with tracer.start_as_current_span(name) as span:
62
+ # Set workflow-level attributes
63
+ span.set_attribute("flock.workflow", True)
64
+ span.set_attribute("workflow.name", name)
65
+ if flock_id:
66
+ span.set_attribute("workflow.flock_id", flock_id)
67
+
68
+ # Store span for nested operations to use
69
+ prev_workflow_span = self._workflow_span
70
+ self._workflow_span = span
71
+
72
+ try:
73
+ yield span
74
+ span.set_status(Status(StatusCode.OK))
75
+ except Exception as e:
76
+ span.set_status(Status(StatusCode.ERROR, str(e)))
77
+ span.record_exception(e)
78
+ raise
79
+ finally:
80
+ # Restore previous workflow span
81
+ self._workflow_span = prev_workflow_span
82
+
83
+ @property
84
+ def current_workflow_span(self) -> Any:
85
+ """Get the current workflow span (for nested operations)."""
86
+ return self._workflow_span
87
+
88
+ @staticmethod
89
+ def clear_traces(db_path: str = ".flock/traces.duckdb") -> dict[str, Any]:
90
+ """Clear all traces from the DuckDB database.
91
+
92
+ Useful for resetting debug sessions or cleaning up test data.
93
+
94
+ Args:
95
+ db_path: Path to the DuckDB database file (default: ".flock/traces.duckdb")
96
+
97
+ Returns:
98
+ Dictionary with operation results:
99
+ - deleted_count: Number of spans deleted
100
+ - success: Whether operation succeeded
101
+ - error: Error message if failed
102
+
103
+ Examples:
104
+ # Clear all traces
105
+ result = TracingManager.clear_traces()
106
+ print(f"Deleted {result['deleted_count']} spans")
107
+
108
+ # Custom database path
109
+ result = TracingManager.clear_traces(".flock/custom_traces.duckdb")
110
+
111
+ # Check if operation succeeded
112
+ if result['success']:
113
+ print("Traces cleared successfully!")
114
+ else:
115
+ print(f"Error: {result['error']}")
116
+ """
117
+ try:
118
+ import duckdb
119
+
120
+ db_file = Path(db_path)
121
+ if not db_file.exists():
122
+ return {
123
+ "success": False,
124
+ "deleted_count": 0,
125
+ "error": f"Database file not found: {db_path}",
126
+ }
127
+
128
+ # Connect and clear
129
+ conn = duckdb.connect(str(db_file))
130
+ try:
131
+ # Get count before deletion
132
+ count_result = conn.execute("SELECT COUNT(*) FROM spans").fetchone()
133
+ deleted_count = count_result[0] if count_result else 0
134
+
135
+ # Delete all spans
136
+ conn.execute("DELETE FROM spans")
137
+
138
+ # Vacuum to reclaim space
139
+ conn.execute("VACUUM")
140
+
141
+ return {"success": True, "deleted_count": deleted_count, "error": None}
142
+
143
+ finally:
144
+ conn.close()
145
+
146
+ except Exception as e:
147
+ return {"success": False, "deleted_count": 0, "error": str(e)}
@@ -0,0 +1,10 @@
1
+ """Storage backends for Flock blackboard."""
2
+
3
+ from flock.storage.sqlite.query_builder import SQLiteQueryBuilder
4
+ from flock.storage.sqlite.schema_manager import SQLiteSchemaManager
5
+
6
+
7
+ __all__ = [
8
+ "SQLiteQueryBuilder",
9
+ "SQLiteSchemaManager",
10
+ ]
@@ -0,0 +1,158 @@
1
+ """Artifact aggregation utilities for summary statistics.
2
+
3
+ Handles aggregation logic for artifact collections, computing statistics
4
+ like type distribution, producer counts, and time ranges.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from datetime import datetime
10
+ from typing import Any
11
+
12
+ from flock.core.artifacts import Artifact
13
+ from flock.utils.time_utils import format_time_span
14
+
15
+
16
+ class ArtifactAggregator:
17
+ """
18
+ Aggregates artifact statistics for summary reports.
19
+
20
+ Provides clean separation of aggregation logic from storage implementations.
21
+ Each aggregation method is simple and focused.
22
+ """
23
+
24
+ def aggregate_by_type(self, artifacts: list[Artifact]) -> dict[str, int]:
25
+ """
26
+ Count artifacts by type.
27
+
28
+ Args:
29
+ artifacts: List of artifacts to aggregate
30
+
31
+ Returns:
32
+ Dict mapping type names to counts
33
+ """
34
+ by_type: dict[str, int] = {}
35
+ for artifact in artifacts:
36
+ by_type[artifact.type] = by_type.get(artifact.type, 0) + 1
37
+ return by_type
38
+
39
+ def aggregate_by_producer(self, artifacts: list[Artifact]) -> dict[str, int]:
40
+ """
41
+ Count artifacts by producer.
42
+
43
+ Args:
44
+ artifacts: List of artifacts to aggregate
45
+
46
+ Returns:
47
+ Dict mapping producer names to counts
48
+ """
49
+ by_producer: dict[str, int] = {}
50
+ for artifact in artifacts:
51
+ by_producer[artifact.produced_by] = (
52
+ by_producer.get(artifact.produced_by, 0) + 1
53
+ )
54
+ return by_producer
55
+
56
+ def aggregate_by_visibility(self, artifacts: list[Artifact]) -> dict[str, int]:
57
+ """
58
+ Count artifacts by visibility kind.
59
+
60
+ Args:
61
+ artifacts: List of artifacts to aggregate
62
+
63
+ Returns:
64
+ Dict mapping visibility kinds to counts
65
+ """
66
+ by_visibility: dict[str, int] = {}
67
+ for artifact in artifacts:
68
+ kind = getattr(artifact.visibility, "kind", "Unknown")
69
+ by_visibility[kind] = by_visibility.get(kind, 0) + 1
70
+ return by_visibility
71
+
72
+ def aggregate_tags(self, artifacts: list[Artifact]) -> dict[str, int]:
73
+ """
74
+ Count tag occurrences across artifacts.
75
+
76
+ Args:
77
+ artifacts: List of artifacts to aggregate
78
+
79
+ Returns:
80
+ Dict mapping tag names to occurrence counts
81
+ """
82
+ tag_counts: dict[str, int] = {}
83
+ for artifact in artifacts:
84
+ for tag in artifact.tags:
85
+ tag_counts[tag] = tag_counts.get(tag, 0) + 1
86
+ return tag_counts
87
+
88
+ def get_date_range(
89
+ self, artifacts: list[Artifact]
90
+ ) -> tuple[datetime | None, datetime | None]:
91
+ """
92
+ Find earliest and latest creation times.
93
+
94
+ Args:
95
+ artifacts: List of artifacts to analyze
96
+
97
+ Returns:
98
+ Tuple of (earliest, latest) datetimes, or (None, None) if empty
99
+ """
100
+ if not artifacts:
101
+ return None, None
102
+
103
+ earliest: datetime | None = None
104
+ latest: datetime | None = None
105
+
106
+ for artifact in artifacts:
107
+ if earliest is None or artifact.created_at < earliest:
108
+ earliest = artifact.created_at
109
+ if latest is None or artifact.created_at > latest:
110
+ latest = artifact.created_at
111
+
112
+ return earliest, latest
113
+
114
+ def build_summary(
115
+ self,
116
+ artifacts: list[Artifact],
117
+ total: int,
118
+ is_full_window: bool,
119
+ ) -> dict[str, Any]:
120
+ """
121
+ Build complete summary statistics for artifacts.
122
+
123
+ Args:
124
+ artifacts: List of artifacts to summarize
125
+ total: Total count (may differ from len(artifacts) if paginated)
126
+ is_full_window: Whether this represents all artifacts (no filters)
127
+
128
+ Returns:
129
+ Dictionary with complete summary statistics:
130
+ - total: Total artifact count
131
+ - by_type: Type distribution
132
+ - by_producer: Producer distribution
133
+ - by_visibility: Visibility distribution
134
+ - tag_counts: Tag occurrence counts
135
+ - earliest_created_at: ISO string of earliest artifact
136
+ - latest_created_at: ISO string of latest artifact
137
+ - is_full_window: Whether all artifacts included
138
+ - window_span_label: Human-readable time span
139
+ """
140
+ by_type = self.aggregate_by_type(artifacts)
141
+ by_producer = self.aggregate_by_producer(artifacts)
142
+ by_visibility = self.aggregate_by_visibility(artifacts)
143
+ tag_counts = self.aggregate_tags(artifacts)
144
+ earliest, latest = self.get_date_range(artifacts)
145
+
146
+ window_span_label = format_time_span(earliest, latest)
147
+
148
+ return {
149
+ "total": total,
150
+ "by_type": by_type,
151
+ "by_producer": by_producer,
152
+ "by_visibility": by_visibility,
153
+ "tag_counts": tag_counts,
154
+ "earliest_created_at": earliest.isoformat() if earliest else None,
155
+ "latest_created_at": latest.isoformat() if latest else None,
156
+ "is_full_window": is_full_window,
157
+ "window_span_label": window_span_label,
158
+ }
@@ -0,0 +1,6 @@
1
+ """In-memory storage implementation utilities."""
2
+
3
+ from __future__ import annotations
4
+
5
+
6
+ __all__ = ["ArtifactFilter", "HistoryAggregator"]
@@ -0,0 +1,114 @@
1
+ """Artifact filtering utilities for in-memory storage.
2
+
3
+ Provides focused filtering logic for InMemoryBlackboardStore.query_artifacts.
4
+ Extracted from store.py to reduce complexity from B (10) to A (4).
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from typing import TYPE_CHECKING
10
+
11
+
12
+ if TYPE_CHECKING:
13
+ from flock.core.artifacts import Artifact
14
+ from flock.core.store import FilterConfig
15
+
16
+
17
+ class ArtifactFilter:
18
+ """
19
+ Filter artifacts based on FilterConfig criteria.
20
+
21
+ Separates filtering logic from query orchestration for better
22
+ testability and reduced complexity.
23
+ """
24
+
25
+ def __init__(self, filters: FilterConfig):
26
+ """
27
+ Initialize filter with configuration.
28
+
29
+ Args:
30
+ filters: Filter configuration with optional criteria
31
+ """
32
+ from flock.registry import type_registry
33
+
34
+ # Pre-resolve canonical types once
35
+ self.canonical_types: set[str] | None = None
36
+ if filters.type_names:
37
+ self.canonical_types = {
38
+ type_registry.resolve_name(name) for name in filters.type_names
39
+ }
40
+
41
+ self.produced_by = filters.produced_by or set()
42
+ self.correlation_id = filters.correlation_id
43
+ self.tags = filters.tags or set()
44
+ self.visibility_kinds = filters.visibility or set()
45
+ self.start = filters.start
46
+ self.end = filters.end
47
+
48
+ def matches(self, artifact: Artifact) -> bool:
49
+ """
50
+ Check if artifact matches all filter criteria.
51
+
52
+ Uses focused helper methods to keep complexity low (A-rated).
53
+ Each criterion is evaluated independently for clarity.
54
+
55
+ Args:
56
+ artifact: Artifact to check against filters
57
+
58
+ Returns:
59
+ True if artifact matches all criteria, False otherwise
60
+
61
+ Examples:
62
+ >>> filter = ArtifactFilter(FilterConfig(produced_by={"agent1"}))
63
+ >>> artifact = Artifact(type="Result", produced_by="agent1", ...)
64
+ >>> filter.matches(artifact)
65
+ True
66
+ """
67
+ return (
68
+ self._matches_type(artifact)
69
+ and self._matches_producer(artifact)
70
+ and self._matches_correlation(artifact)
71
+ and self._matches_tags(artifact)
72
+ and self._matches_visibility(artifact)
73
+ and self._matches_time_range(artifact)
74
+ )
75
+
76
+ def _matches_type(self, artifact: Artifact) -> bool:
77
+ """Check if artifact type matches filter."""
78
+ if not self.canonical_types:
79
+ return True
80
+ return artifact.type in self.canonical_types
81
+
82
+ def _matches_producer(self, artifact: Artifact) -> bool:
83
+ """Check if artifact producer matches filter."""
84
+ if not self.produced_by:
85
+ return True
86
+ return artifact.produced_by in self.produced_by
87
+
88
+ def _matches_correlation(self, artifact: Artifact) -> bool:
89
+ """Check if artifact correlation ID matches filter."""
90
+ if not self.correlation_id:
91
+ return True
92
+ if artifact.correlation_id is None:
93
+ return False
94
+ return str(artifact.correlation_id) == self.correlation_id
95
+
96
+ def _matches_tags(self, artifact: Artifact) -> bool:
97
+ """Check if artifact has all required tags."""
98
+ if not self.tags:
99
+ return True
100
+ return self.tags.issubset(artifact.tags)
101
+
102
+ def _matches_visibility(self, artifact: Artifact) -> bool:
103
+ """Check if artifact visibility kind matches filter."""
104
+ if not self.visibility_kinds:
105
+ return True
106
+ return artifact.visibility.kind in self.visibility_kinds
107
+
108
+ def _matches_time_range(self, artifact: Artifact) -> bool:
109
+ """Check if artifact creation time is within range."""
110
+ if self.start and artifact.created_at < self.start:
111
+ return False
112
+ if self.end and artifact.created_at > self.end:
113
+ return False
114
+ return True