flock-core 0.5.9__py3-none-any.whl → 0.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (54) hide show
  1. flock/agent.py +149 -62
  2. flock/api/themes.py +6 -2
  3. flock/api_models.py +285 -0
  4. flock/artifact_collector.py +6 -3
  5. flock/batch_accumulator.py +3 -1
  6. flock/cli.py +3 -1
  7. flock/components.py +45 -56
  8. flock/context_provider.py +531 -0
  9. flock/correlation_engine.py +8 -4
  10. flock/dashboard/collector.py +48 -29
  11. flock/dashboard/events.py +10 -4
  12. flock/dashboard/launcher.py +3 -1
  13. flock/dashboard/models/graph.py +9 -3
  14. flock/dashboard/service.py +187 -93
  15. flock/dashboard/websocket.py +17 -4
  16. flock/engines/dspy_engine.py +174 -98
  17. flock/engines/examples/simple_batch_engine.py +9 -3
  18. flock/examples.py +6 -2
  19. flock/frontend/src/services/indexeddb.test.ts +4 -4
  20. flock/frontend/src/services/indexeddb.ts +1 -1
  21. flock/helper/cli_helper.py +14 -1
  22. flock/logging/auto_trace.py +6 -1
  23. flock/logging/formatters/enum_builder.py +3 -1
  24. flock/logging/formatters/theme_builder.py +32 -17
  25. flock/logging/formatters/themed_formatter.py +38 -22
  26. flock/logging/logging.py +21 -7
  27. flock/logging/telemetry.py +9 -3
  28. flock/logging/telemetry_exporter/duckdb_exporter.py +27 -25
  29. flock/logging/trace_and_logged.py +14 -5
  30. flock/mcp/__init__.py +3 -6
  31. flock/mcp/client.py +49 -19
  32. flock/mcp/config.py +12 -6
  33. flock/mcp/manager.py +6 -2
  34. flock/mcp/servers/sse/flock_sse_server.py +9 -3
  35. flock/mcp/servers/streamable_http/flock_streamable_http_server.py +6 -2
  36. flock/mcp/tool.py +18 -6
  37. flock/mcp/types/handlers.py +3 -1
  38. flock/mcp/types/types.py +9 -3
  39. flock/orchestrator.py +449 -58
  40. flock/orchestrator_component.py +15 -5
  41. flock/patches/dspy_streaming_patch.py +12 -4
  42. flock/registry.py +9 -3
  43. flock/runtime.py +69 -18
  44. flock/service.py +135 -64
  45. flock/store.py +29 -10
  46. flock/subscription.py +6 -4
  47. flock/system_artifacts.py +33 -0
  48. flock/utilities.py +41 -13
  49. flock/utility/output_utility_component.py +31 -11
  50. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/METADATA +150 -26
  51. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/RECORD +54 -51
  52. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/WHEEL +0 -0
  53. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/entry_points.txt +0 -0
  54. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/licenses/LICENSE +0 -0
flock/store.py CHANGED
@@ -227,7 +227,9 @@ class InMemoryBlackboardStore(BlackboardStore):
227
227
  self._lock = Lock()
228
228
  self._by_id: dict[UUID, Artifact] = {}
229
229
  self._by_type: dict[str, list[Artifact]] = defaultdict(list)
230
- self._consumptions_by_artifact: dict[UUID, list[ConsumptionRecord]] = defaultdict(list)
230
+ self._consumptions_by_artifact: dict[UUID, list[ConsumptionRecord]] = (
231
+ defaultdict(list)
232
+ )
231
233
  self._agent_snapshots: dict[str, AgentSnapshotRecord] = {}
232
234
 
233
235
  async def publish(self, artifact: Artifact) -> None:
@@ -254,7 +256,9 @@ class InMemoryBlackboardStore(BlackboardStore):
254
256
  artifacts = self._by_type.get(canonical, [])
255
257
  return [artifact_type(**artifact.payload) for artifact in artifacts] # type: ignore
256
258
 
257
- async def extend(self, artifacts: Iterable[Artifact]) -> None: # pragma: no cover - helper
259
+ async def extend(
260
+ self, artifacts: Iterable[Artifact]
261
+ ) -> None: # pragma: no cover - helper
258
262
  for artifact in artifacts:
259
263
  await self.publish(artifact)
260
264
 
@@ -280,7 +284,9 @@ class InMemoryBlackboardStore(BlackboardStore):
280
284
  filters = filters or FilterConfig()
281
285
  canonical: set[str] | None = None
282
286
  if filters.type_names:
283
- canonical = {type_registry.resolve_name(name) for name in filters.type_names}
287
+ canonical = {
288
+ type_registry.resolve_name(name) for name in filters.type_names
289
+ }
284
290
 
285
291
  visibility_filter = filters.visibility or set()
286
292
 
@@ -347,7 +353,9 @@ class InMemoryBlackboardStore(BlackboardStore):
347
353
  if not isinstance(artifact, Artifact):
348
354
  raise TypeError("Expected Artifact instance")
349
355
  by_type[artifact.type] = by_type.get(artifact.type, 0) + 1
350
- by_producer[artifact.produced_by] = by_producer.get(artifact.produced_by, 0) + 1
356
+ by_producer[artifact.produced_by] = (
357
+ by_producer.get(artifact.produced_by, 0) + 1
358
+ )
351
359
  kind = getattr(artifact.visibility, "kind", "Unknown")
352
360
  by_visibility[kind] = by_visibility.get(kind, 0) + 1
353
361
  for tag in artifact.tags:
@@ -476,7 +484,9 @@ class SQLiteBlackboardStore(BlackboardStore):
476
484
  "version": artifact.version,
477
485
  "visibility": visibility_json,
478
486
  "tags": tags_json,
479
- "correlation_id": str(artifact.correlation_id) if artifact.correlation_id else None,
487
+ "correlation_id": str(artifact.correlation_id)
488
+ if artifact.correlation_id
489
+ else None,
480
490
  "partition_key": artifact.partition_key,
481
491
  "created_at": created_at,
482
492
  }
@@ -816,7 +826,8 @@ class SQLiteBlackboardStore(BlackboardStore):
816
826
  by_visibility_rows = await cursor.fetchall()
817
827
  await cursor.close()
818
828
  by_visibility = {
819
- (row["visibility_kind"] or "Unknown"): row["count"] for row in by_visibility_rows
829
+ (row["visibility_kind"] or "Unknown"): row["count"]
830
+ for row in by_visibility_rows
820
831
  }
821
832
 
822
833
  tag_query = f"""
@@ -839,7 +850,9 @@ class SQLiteBlackboardStore(BlackboardStore):
839
850
  cursor = await conn.execute(range_query, params_tuple)
840
851
  range_row = await cursor.fetchone()
841
852
  await cursor.close()
842
- earliest = range_row["earliest"] if range_row and range_row["earliest"] else None
853
+ earliest = (
854
+ range_row["earliest"] if range_row and range_row["earliest"] else None
855
+ )
843
856
  latest = range_row["latest"] if range_row and range_row["latest"] else None
844
857
 
845
858
  return {
@@ -902,7 +915,9 @@ class SQLiteBlackboardStore(BlackboardStore):
902
915
  consumption_rows = await cursor.fetchall()
903
916
  await cursor.close()
904
917
 
905
- consumed_by_type = {row["canonical_type"]: row["count"] for row in consumption_rows}
918
+ consumed_by_type = {
919
+ row["canonical_type"]: row["count"] for row in consumption_rows
920
+ }
906
921
  consumed_total = sum(consumed_by_type.values())
907
922
 
908
923
  return {
@@ -1156,7 +1171,9 @@ class SQLiteBlackboardStore(BlackboardStore):
1156
1171
  params: list[Any] = []
1157
1172
 
1158
1173
  if filters.type_names:
1159
- canonical = {type_registry.resolve_name(name) for name in filters.type_names}
1174
+ canonical = {
1175
+ type_registry.resolve_name(name) for name in filters.type_names
1176
+ }
1160
1177
  placeholders = ", ".join("?" for _ in canonical)
1161
1178
  conditions.append(f"{prefix}canonical_type IN ({placeholders})")
1162
1179
  params.extend(sorted(canonical))
@@ -1172,7 +1189,9 @@ class SQLiteBlackboardStore(BlackboardStore):
1172
1189
 
1173
1190
  if filters.visibility:
1174
1191
  placeholders = ", ".join("?" for _ in filters.visibility)
1175
- conditions.append(f"json_extract({prefix}visibility, '$.kind') IN ({placeholders})")
1192
+ conditions.append(
1193
+ f"json_extract({prefix}visibility, '$.kind') IN ({placeholders})"
1194
+ )
1176
1195
  params.extend(sorted(filters.visibility))
1177
1196
 
1178
1197
  if filters.start is not None:
flock/subscription.py CHANGED
@@ -102,7 +102,7 @@ class Subscription:
102
102
  where: Sequence[Predicate] | None = None,
103
103
  text_predicates: Sequence[TextPredicate] | None = None,
104
104
  from_agents: Iterable[str] | None = None,
105
- channels: Iterable[str] | None = None,
105
+ tags: Iterable[str] | None = None,
106
106
  join: JoinSpec | None = None,
107
107
  batch: BatchSpec | None = None,
108
108
  delivery: str = "exclusive",
@@ -116,7 +116,9 @@ class Subscription:
116
116
 
117
117
  # Register all types and build counts (supports duplicates for count-based AND gates)
118
118
  type_name_list = [type_registry.register(t) for t in types]
119
- self.type_names: set[str] = set(type_name_list) # Unique type names (for matching)
119
+ self.type_names: set[str] = set(
120
+ type_name_list
121
+ ) # Unique type names (for matching)
120
122
 
121
123
  # Count-based AND gate: Track how many of each type are required
122
124
  # Example: .consumes(A, A, B) → {"TypeA": 2, "TypeB": 1}
@@ -127,7 +129,7 @@ class Subscription:
127
129
  self.where = list(where or [])
128
130
  self.text_predicates = list(text_predicates or [])
129
131
  self.from_agents = set(from_agents or [])
130
- self.channels = set(channels or [])
132
+ self.tags = set(tags or [])
131
133
  self.join = join
132
134
  self.batch = batch
133
135
  self.delivery = delivery
@@ -145,7 +147,7 @@ class Subscription:
145
147
  return False
146
148
  if self.from_agents and artifact.produced_by not in self.from_agents:
147
149
  return False
148
- if self.channels and not artifact.tags.intersection(self.channels):
150
+ if self.tags and not artifact.tags.intersection(self.tags):
149
151
  return False
150
152
 
151
153
  # Evaluate where predicates on typed payloads
@@ -0,0 +1,33 @@
1
+ """System-level artifact types published by the Flock orchestrator.
2
+
3
+ These artifacts provide workflow telemetry and error tracking.
4
+ """
5
+
6
+ from datetime import datetime
7
+
8
+ from pydantic import BaseModel, Field
9
+
10
+ from flock.registry import flock_type
11
+
12
+
13
+ @flock_type
14
+ class WorkflowError(BaseModel):
15
+ """Error artifact published when an agent execution fails.
16
+
17
+ This artifact is automatically published by the orchestrator when an agent
18
+ raises an exception during execution. It includes the correlation_id to enable
19
+ error tracking for workflows.
20
+
21
+ The workflow continues execution for other branches even when this is published.
22
+ """
23
+
24
+ failed_agent: str = Field(description="Name of the agent that failed")
25
+ error_type: str = Field(description="Type of exception that occurred")
26
+ error_message: str = Field(description="Error message from the exception")
27
+ timestamp: datetime = Field(description="When the error occurred")
28
+ task_id: str | None = Field(
29
+ default=None, description="Task ID of the failed execution"
30
+ )
31
+
32
+
33
+ __all__ = ["WorkflowError"]
flock/utilities.py CHANGED
@@ -31,7 +31,9 @@ class MetricsUtility(AgentComponent):
31
31
 
32
32
  name: str | None = "metrics"
33
33
 
34
- async def on_pre_evaluate(self, agent, ctx: Context, inputs: EvalInputs) -> EvalInputs:
34
+ async def on_pre_evaluate(
35
+ self, agent, ctx: Context, inputs: EvalInputs
36
+ ) -> EvalInputs:
35
37
  ctx.state.setdefault("metrics", {})[f"{agent.name}:start"] = time.perf_counter()
36
38
  return inputs
37
39
 
@@ -42,7 +44,9 @@ class MetricsUtility(AgentComponent):
42
44
  start = metrics.get(f"{agent.name}:start")
43
45
  if start:
44
46
  metrics[f"{agent.name}:duration_ms"] = (time.perf_counter() - start) * 1000
45
- result.metrics.update({k: v for k, v in metrics.items() if k.endswith("duration_ms")})
47
+ result.metrics.update({
48
+ k: v for k, v in metrics.items() if k.endswith("duration_ms")
49
+ })
46
50
  return result
47
51
 
48
52
 
@@ -78,11 +82,15 @@ class LoggingUtility(AgentComponent):
78
82
 
79
83
  async def on_pre_consume(self, agent, ctx: Context, inputs: list[Any]):
80
84
  summary = ", ".join(self._summarize_artifact(art) for art in inputs) or "<none>"
81
- self._console.log(f"[{agent.name}] consume n={len(inputs)} artifacts -> {summary}")
85
+ self._console.log(
86
+ f"[{agent.name}] consume n={len(inputs)} artifacts -> {summary}"
87
+ )
82
88
  self._render_artifacts(agent.name, inputs, role="input")
83
89
  return await super().on_pre_consume(agent, ctx, inputs)
84
90
 
85
- async def on_pre_evaluate(self, agent, ctx: Context, inputs: EvalInputs) -> EvalInputs:
91
+ async def on_pre_evaluate(
92
+ self, agent, ctx: Context, inputs: EvalInputs
93
+ ) -> EvalInputs:
86
94
  if self._stream_tokens:
87
95
  self._maybe_start_stream(agent, ctx)
88
96
  return await super().on_pre_evaluate(agent, ctx, inputs)
@@ -91,7 +99,9 @@ class LoggingUtility(AgentComponent):
91
99
  self, agent, ctx: Context, inputs: EvalInputs, result: EvalResult
92
100
  ) -> EvalResult:
93
101
  self._render_metrics(agent.name, result.metrics)
94
- self._render_artifacts(agent.name, result.artifacts or inputs.artifacts, role="output")
102
+ self._render_artifacts(
103
+ agent.name, result.artifacts or inputs.artifacts, role="output"
104
+ )
95
105
  if result.logs:
96
106
  self._render_logs(agent.name, result.logs)
97
107
  awaited = await super().on_post_evaluate(agent, ctx, inputs, result)
@@ -102,7 +112,9 @@ class LoggingUtility(AgentComponent):
102
112
  async def on_post_publish(self, agent, ctx: Context, artifact):
103
113
  visibility = getattr(artifact.visibility, "kind", "Public")
104
114
  subtitle = f"visibility={visibility}"
105
- panel = self._build_artifact_panel(artifact, role="published", subtitle=subtitle)
115
+ panel = self._build_artifact_panel(
116
+ artifact, role="published", subtitle=subtitle
117
+ )
106
118
  self._console.print(panel)
107
119
  await super().on_post_publish(agent, ctx, artifact)
108
120
 
@@ -121,7 +133,9 @@ class LoggingUtility(AgentComponent):
121
133
  # ------------------------------------------------------------------
122
134
  # Rendering helpers
123
135
 
124
- def _render_artifacts(self, agent_name: str, artifacts: Sequence[Any], *, role: str) -> None:
136
+ def _render_artifacts(
137
+ self, agent_name: str, artifacts: Sequence[Any], *, role: str
138
+ ) -> None:
125
139
  for artifact in artifacts:
126
140
  panel = self._build_artifact_panel(artifact, role=role)
127
141
  self._console.print(panel)
@@ -189,7 +203,9 @@ class LoggingUtility(AgentComponent):
189
203
  else:
190
204
  textual.append(line)
191
205
  for payload in json_sections:
192
- panel = Panel(payload, title=f"{agent_name} ▸ dspy.output", border_style="green")
206
+ panel = Panel(
207
+ payload, title=f"{agent_name} ▸ dspy.output", border_style="green"
208
+ )
193
209
  self._console.print(panel)
194
210
  if textual:
195
211
  body = Text("\n".join(textual) + "\n")
@@ -244,7 +260,9 @@ class LoggingUtility(AgentComponent):
244
260
  with contextlib.suppress(asyncio.CancelledError):
245
261
  await task
246
262
 
247
- async def _consume_stream(self, agent_name: str, stream_key: str, queue: asyncio.Queue) -> None:
263
+ async def _consume_stream(
264
+ self, agent_name: str, stream_key: str, queue: asyncio.Queue
265
+ ) -> None:
248
266
  body = Text()
249
267
  live: Live | None = None
250
268
  try:
@@ -254,7 +272,9 @@ class LoggingUtility(AgentComponent):
254
272
  break
255
273
  kind = event.get("kind")
256
274
  if live is None:
257
- live_panel = Panel(body, title=f"{agent_name} ▸ streaming", border_style="cyan")
275
+ live_panel = Panel(
276
+ body, title=f"{agent_name} ▸ streaming", border_style="cyan"
277
+ )
258
278
  live = Live(
259
279
  live_panel,
260
280
  console=self._console,
@@ -274,19 +294,27 @@ class LoggingUtility(AgentComponent):
274
294
  message = event.get("message") or ""
275
295
  body.append(f"\n⚠ {message}\n", style="bold red")
276
296
  if live is not None:
277
- live.update(Panel(body, title=f"{agent_name} ▸ streaming", border_style="cyan"))
297
+ live.update(
298
+ Panel(
299
+ body, title=f"{agent_name} ▸ streaming", border_style="cyan"
300
+ )
301
+ )
278
302
  finally:
279
303
  if live is not None:
280
304
  live.__exit__(None, None, None)
281
305
  if body.plain:
282
306
  self._console.print(
283
- Panel(body, title=f"{agent_name} ▸ stream transcript", border_style="cyan")
307
+ Panel(
308
+ body, title=f"{agent_name} ▸ stream transcript", border_style="cyan"
309
+ )
284
310
  )
285
311
 
286
312
  def _stream_key(self, agent, ctx: Context) -> str:
287
313
  return f"{ctx.task_id}:{agent.name}"
288
314
 
289
- def _attach_stream_queue(self, state: MutableMapping[str, Any], queue: asyncio.Queue) -> None:
315
+ def _attach_stream_queue(
316
+ self, state: MutableMapping[str, Any], queue: asyncio.Queue
317
+ ) -> None:
290
318
  state.setdefault("_logging", {})["stream_queue"] = queue
291
319
 
292
320
  def _detach_stream_queue(self, state: MutableMapping[str, Any]) -> None:
@@ -28,8 +28,12 @@ class OutputUtilityConfig(AgentComponentConfig):
28
28
  theme: OutputTheme = Field(
29
29
  default=OutputTheme.catppuccin_mocha, description="Theme for output formatting"
30
30
  )
31
- render_table: bool = Field(default=True, description="Whether to render output as a table")
32
- max_length: int = Field(default=1000, description="Maximum length for displayed output")
31
+ render_table: bool = Field(
32
+ default=True, description="Whether to render output as a table"
33
+ )
34
+ max_length: int = Field(
35
+ default=1000, description="Maximum length for displayed output"
36
+ )
33
37
  truncate_long_values: bool = Field(
34
38
  default=True, description="Whether to truncate long values in display"
35
39
  )
@@ -61,7 +65,9 @@ class OutputUtilityComponent(AgentComponent):
61
65
  default_factory=OutputUtilityConfig, description="Output configuration"
62
66
  )
63
67
 
64
- def __init__(self, name: str = "output", config: OutputUtilityConfig | None = None, **data):
68
+ def __init__(
69
+ self, name: str = "output", config: OutputUtilityConfig | None = None, **data
70
+ ):
65
71
  if config is None:
66
72
  config = OutputUtilityConfig()
67
73
  super().__init__(name=name, config=config, **data)
@@ -96,7 +102,11 @@ class OutputUtilityComponent(AgentComponent):
96
102
  items = []
97
103
  prefix = " " * indent
98
104
  for key, value in d.items():
99
- if self.config.truncate_long_values and isinstance(value, str) and len(value) > 100:
105
+ if (
106
+ self.config.truncate_long_values
107
+ and isinstance(value, str)
108
+ and len(value) > 100
109
+ ):
100
110
  value = value[:97] + "..."
101
111
  formatted_value = self._format_value(value, key)
102
112
  items.append(f"{prefix} {key}: {formatted_value}")
@@ -125,7 +135,9 @@ class OutputUtilityComponent(AgentComponent):
125
135
  return f"[CODE:{language}]\n{code}\n[/CODE]"
126
136
 
127
137
  # Replace markdown-style code blocks
128
- return re.sub(r"```(\w+)?\n(.*?)\n```", replace_code_block, text, flags=re.DOTALL)
138
+ return re.sub(
139
+ r"```(\w+)?\n(.*?)\n```", replace_code_block, text, flags=re.DOTALL
140
+ )
129
141
 
130
142
  async def on_post_evaluate(
131
143
  self, agent: "Agent", ctx: Context, inputs: EvalInputs, result: EvalResult
@@ -138,7 +150,9 @@ class OutputUtilityComponent(AgentComponent):
138
150
  streamed_artifact_id = None
139
151
 
140
152
  if ctx:
141
- streaming_live_handled = bool(ctx.get_variable("_flock_stream_live_active", False))
153
+ streaming_live_handled = bool(
154
+ ctx.get_variable("_flock_stream_live_active", False)
155
+ )
142
156
  output_queued = bool(ctx.get_variable("_flock_output_queued", False))
143
157
  streamed_artifact_id = ctx.get_variable("_flock_streamed_artifact_id")
144
158
 
@@ -162,20 +176,24 @@ class OutputUtilityComponent(AgentComponent):
162
176
 
163
177
  # Skip output if streaming already handled it (and no ID to update)
164
178
  if streaming_live_handled:
165
- logger.debug("Skipping static table because streaming rendered live output.")
179
+ logger.debug(
180
+ "Skipping static table because streaming rendered live output."
181
+ )
166
182
  return result
167
183
 
168
184
  # If output was queued due to concurrent stream, wait and then display
169
185
  if output_queued:
170
186
  # Wait for active streams to complete
171
- orchestrator = getattr(ctx, "orchestrator", None)
172
- if orchestrator:
187
+ # Phase 6+7 Security Fix: Use Agent class variable instead of ctx.state
188
+ if ctx:
173
189
  import asyncio
174
190
 
191
+ from flock.agent import Agent
192
+
175
193
  # Wait until no streams are active
176
194
  max_wait = 30 # seconds
177
195
  waited = 0
178
- while getattr(orchestrator, "_active_streams", 0) > 0 and waited < max_wait:
196
+ while Agent._streaming_counter > 0 and waited < max_wait:
179
197
  await asyncio.sleep(0.1)
180
198
  waited += 0.1
181
199
  logger.debug(
@@ -189,7 +207,9 @@ class OutputUtilityComponent(AgentComponent):
189
207
  try:
190
208
  # Create a copy or select relevant parts to avoid modifying original result dict directly
191
209
  display_result = result.copy()
192
- display_result["context_snapshot"] = ctx.to_dict() # Potential performance hit
210
+ display_result["context_snapshot"] = (
211
+ ctx.to_dict()
212
+ ) # Potential performance hit
193
213
  except Exception:
194
214
  display_result = result.copy()
195
215
  display_result["context_snapshot"] = "[Error serializing context]"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: flock-core
3
- Version: 0.5.9
3
+ Version: 0.5.11
4
4
  Summary: Flock: A declrative framework for building and orchestrating AI agents.
5
5
  Author-email: Andre Ratzenberger <andre.ratzenberger@whiteduck.de>
6
6
  License: MIT
@@ -37,7 +37,7 @@ Description-Content-Type: text/markdown
37
37
  <p align="center">
38
38
  <a href="https://whiteducksoftware.github.io/flock/" target="_blank"><img alt="Documentation" src="https://img.shields.io/badge/docs-online-blue?style=for-the-badge&logo=readthedocs"></a>
39
39
  <a href="https://pypi.org/project/flock-core/" target="_blank"><img alt="PyPI Version" src="https://img.shields.io/pypi/v/flock-core?style=for-the-badge&logo=pypi&label=pip%20version"></a>
40
- <img alt="Python Version" src="https://img.shields.io/badge/python-3.10%2B-blue?style=for-the-badge&logo=python">
40
+ <img alt="Python Version" src="https://img.shields.io/badge/python-3.12%2B-blue?style=for-the-badge&logo=python">
41
41
  <a href="LICENSE" target="_blank"><img alt="License" src="https://img.shields.io/github/license/whiteducksoftware/flock?style=for-the-badge"></a>
42
42
  <a href="https://whiteduck.de" target="_blank"><img alt="Built by white duck" src="https://img.shields.io/badge/Built%20by-white%20duck%20GmbH-white?style=for-the-badge&labelColor=black"></a>
43
43
  <a href="https://codecov.io/gh/whiteducksoftware/flock" target="_blank"><img alt="Test Coverage" src="https://codecov.io/gh/whiteducksoftware/flock/branch/main/graph/badge.svg?token=YOUR_TOKEN_HERE&style=for-the-badge"></a>
@@ -287,30 +287,6 @@ asyncio.run(main())
287
287
 
288
288
  ---
289
289
 
290
- ## Persistent Blackboard History
291
-
292
- The in-memory store is still great for local tinkering, but production teams now have a durable option. Plugging in `SQLiteBlackboardStore` turns the blackboard into a persistent event log with first-class ergonomics:
293
-
294
- - **Long-lived artifacts** — every field (payload, tags, partition keys, visibility) is stored for replay, audits, and postmortems
295
- - **Historical APIs** — `/api/v1/artifacts`, `/summary`, and `/agents/{agent_id}/history-summary` expose pagination, filtering, and consumption counts
296
- - **Dashboard module** — the new **Historical Blackboard** experience preloads persisted history, enriches the graph with consumer metadata, and highlights retention windows
297
- - **Operational tooling** — CLI helpers (`init-sqlite-store`, `sqlite-maintenance --delete-before ... --vacuum`) make schema setup and retention policies scriptable
298
-
299
- Quick start:
300
-
301
- ```python
302
- from flock import Flock
303
- from flock.store import SQLiteBlackboardStore
304
-
305
- store = SQLiteBlackboardStore(".flock/blackboard.db")
306
- await store.ensure_schema()
307
- flock = Flock("openai/gpt-4.1", store=store)
308
- ```
309
-
310
- Run `examples/02-the-blackboard/01_persistent_pizza.py` to generate history, then launch `examples/03-the-dashboard/04_persistent_pizza_dashboard.py` and explore previous runs, consumption trails, and retention banners inside the dashboard.
311
-
312
- ---
313
-
314
290
  ## Core Concepts
315
291
 
316
292
  ### Typed Artifacts (The Vocabulary)
@@ -541,8 +517,127 @@ artifact.visibility = AfterVisibility(ttl=timedelta(hours=24), then=PublicVisibi
541
517
  agent.publishes(PublicReport, visibility=PublicVisibility())
542
518
  ```
543
519
 
520
+ **Visibility has a dual purpose:** It controls both which agents can be **triggered** by an artifact AND which artifacts agents can **see** in their context. This ensures consistent security across agent execution and data access—agents cannot bypass visibility controls through subscription filters or context providers.
521
+
544
522
  **Why this matters:** Financial services, healthcare, defense, SaaS platforms all need this for compliance. Other frameworks make you build it yourself.
545
523
 
524
+ ---
525
+
526
+ ### 🔒 Architecturally Impossible to Bypass Security
527
+
528
+ **Here's what makes Flock different:** In most frameworks, security is something you remember to add. In Flock, **it's architecturally impossible to forget.**
529
+
530
+ Every context provider in Flock inherits from `BaseContextProvider`, which enforces visibility filtering **automatically**. You literally cannot create a provider that forgets to check permissions—the security logic is baked into the base class and executes before your custom code even runs.
531
+
532
+ **What this means in practice:**
533
+
534
+ ```python
535
+ # ❌ Other frameworks: Security is your responsibility (easy to forget!)
536
+ class MyProvider:
537
+ async def get_context(self, agent):
538
+ artifacts = store.get_all() # OOPS! Forgot to check visibility!
539
+ return artifacts # 🔥 Security vulnerability
540
+
541
+ # ✅ Flock: Security is enforced automatically (impossible to bypass!)
542
+ class MyProvider(BaseContextProvider):
543
+ async def get_artifacts(self, request):
544
+ artifacts = await store.query_artifacts(...)
545
+ return artifacts # ✨ Visibility filtering happens automatically!
546
+ # BaseContextProvider calls .visibility.allows() for you
547
+ # You CANNOT bypass this - it's enforced by the architecture
548
+ ```
549
+
550
+ **Built-in providers (all inherit BaseContextProvider):**
551
+ - `DefaultContextProvider` - Full blackboard access (visibility-filtered)
552
+ - `CorrelatedContextProvider` - Workflow isolation (visibility-filtered)
553
+ - `RecentContextProvider` - Token cost control (visibility-filtered)
554
+ - `TimeWindowContextProvider` - Time-based filtering (visibility-filtered)
555
+ - `EmptyContextProvider` - Stateless agents (zero context)
556
+ - `FilteredContextProvider` - Custom filtering (visibility-filtered)
557
+
558
+ **Every single one enforces visibility automatically. Zero chance of accidentally leaking data.**
559
+
560
+ This isn't just convenient—it's **security by design**. When you're building HIPAA-compliant healthcare systems or SOC2-certified SaaS platforms, "impossible to bypass even by accident" is the only acceptable standard.
561
+
562
+ ---
563
+
564
+ ### Context Providers (The Smart Filter)
565
+
566
+ **Control what agents see with custom Context Providers:**
567
+
568
+ ```python
569
+ from flock.context_provider import FilteredContextProvider, PasswordRedactorProvider
570
+ from flock.store import FilterConfig
571
+
572
+ # Global filtering - all agents see only urgent items
573
+ flock = Flock(
574
+ "openai/gpt-4.1",
575
+ context_provider=FilteredContextProvider(FilterConfig(tags={"urgent"}))
576
+ )
577
+
578
+ # Per-agent overrides - specialized context per agent
579
+ error_agent = flock.agent("errors").consumes(Log).publishes(Alert)
580
+ error_agent.context_provider = FilteredContextProvider(FilterConfig(tags={"ERROR"}))
581
+
582
+ # Production-ready password filtering
583
+ from examples.context_provider import PasswordRedactorProvider
584
+ flock = Flock(
585
+ "openai/gpt-4.1",
586
+ context_provider=PasswordRedactorProvider() # Auto-redacts sensitive data!
587
+ )
588
+ ```
589
+
590
+ **What just happened:**
591
+ - ✅ **Filtered context** - Agents see only relevant artifacts (save tokens, improve performance)
592
+ - ✅ **Security boundary** - Visibility enforcement + custom filtering (mandatory, cannot bypass)
593
+ - ✅ **Sensitive data protection** - Auto-redact passwords, API keys, credit cards, SSN, JWT tokens
594
+ - ✅ **Per-agent specialization** - Different agents, different context rules
595
+
596
+ **Production patterns:**
597
+ ```python
598
+ # Password/secret redaction (copy-paste ready!)
599
+ provider = PasswordRedactorProvider(
600
+ custom_patterns={"internal_id": r"ID-\d{6}"},
601
+ redaction_text="[REDACTED]"
602
+ )
603
+
604
+ # Role-based access control
605
+ junior_agent.context_provider = FilteredContextProvider(FilterConfig(tags={"ERROR"}))
606
+ senior_agent.context_provider = FilteredContextProvider(FilterConfig(tags={"ERROR", "WARN"}))
607
+ admin_agent.context_provider = None # See everything (uses default)
608
+
609
+ # Multi-tenant isolation
610
+ agent.context_provider = FilteredContextProvider(
611
+ FilterConfig(tags={"tenant:customer_123"})
612
+ )
613
+ ```
614
+
615
+ **Why this matters:** Reduce token costs (90%+ with smart filtering), protect sensitive data (auto-redact secrets), improve performance (agents see only what they need).
616
+
617
+ **📖 [Learn more: Context Providers Guide](https://whiteducksoftware.github.io/flock/guides/context-providers/) | [Steal production code →](examples/08-context-provider/)**
618
+
619
+ ### Persistent Blackboard History
620
+
621
+ The in-memory store is great for local development, but production teams need durability. The `SQLiteBlackboardStore` turns the blackboard into a persistent event log with first-class ergonomics:
622
+
623
+ **What you get:**
624
+ - **Long-lived artifacts** — Every field (payload, tags, partition keys, visibility) stored for replay, audits, and postmortems
625
+ - **Historical APIs** — `/api/v1/artifacts`, `/summary`, and `/agents/{agent_id}/history-summary` expose pagination, filtering, and consumption counts
626
+ - **Dashboard integration** — The **Historical Blackboard** view preloads persisted history, enriches the graph with consumer metadata, and highlights retention windows
627
+ - **Operational tooling** — CLI helpers (`init-sqlite-store`, `sqlite-maintenance --delete-before ... --vacuum`) make schema setup and retention policies scriptable
628
+
629
+ **Quick start:**
630
+ ```python
631
+ from flock import Flock
632
+ from flock.store import SQLiteBlackboardStore
633
+
634
+ store = SQLiteBlackboardStore(".flock/blackboard.db")
635
+ await store.ensure_schema()
636
+ flock = Flock("openai/gpt-4.1", store=store)
637
+ ```
638
+
639
+ **Try it:** Run `examples/02-the-blackboard/01_persistent_pizza.py` to generate history, then launch `examples/03-the-dashboard/04_persistent_pizza_dashboard.py` to explore previous runs, consumption trails, and retention banners.
640
+
546
641
  ### Batching Pattern: Parallel Execution Control
547
642
 
548
643
  **A key differentiator:** The separation of `publish()` and `run_until_idle()` enables parallel execution.
@@ -670,6 +765,35 @@ agent.best_of(150, ...) # ⚠️ Warns: "best_of(150) is very high - high LLM c
670
765
 
671
766
  ## Production-Ready Observability
672
767
 
768
+ ### Sophisticated REST API
769
+
770
+ **Production-ready HTTP endpoints with comprehensive OpenAPI documentation:**
771
+
772
+ Flock includes a fully-featured REST API for programmatic access to the blackboard, agents, and workflow orchestration. Perfect for integration with external systems, building custom UIs, or monitoring production deployments.
773
+
774
+ **Key endpoints:**
775
+ - `POST /api/v1/artifacts` - Publish artifacts to the blackboard
776
+ - `GET /api/v1/artifacts` - Query artifacts with filtering, pagination, and consumption metadata
777
+ - `POST /api/v1/agents/{name}/run` - Direct agent invocation
778
+ - `GET /api/v1/correlations/{correlation_id}/status` - Workflow completion tracking
779
+ - `GET /api/v1/agents` - List all registered agents with subscriptions
780
+ - `GET /health` and `GET /metrics` - Production monitoring
781
+
782
+ **Start the API server:**
783
+ ```python
784
+ await flock.serve(dashboard=True) # API + Dashboard on port 8344
785
+ # API docs: http://localhost:8344/docs
786
+ ```
787
+
788
+ **Features:**
789
+ - ✅ **OpenAPI 3.0** - Interactive documentation at `/docs`
790
+ - ✅ **Pydantic validation** - Type-safe request/response models
791
+ - ✅ **Correlation tracking** - Monitor workflow completion with polling
792
+ - ✅ **Consumption metadata** - Full artifact lineage and agent execution trails
793
+ - ✅ **Production monitoring** - Health checks and Prometheus-compatible metrics
794
+
795
+ **📖 [Explore the API →](http://localhost:8344/docs)** (start the server first!)
796
+
673
797
  ### Real-Time Dashboard
674
798
 
675
799
  **Start the dashboard with one line:**