flock-core 0.5.9__py3-none-any.whl → 0.5.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (52) hide show
  1. flock/agent.py +149 -62
  2. flock/api/themes.py +6 -2
  3. flock/artifact_collector.py +6 -3
  4. flock/batch_accumulator.py +3 -1
  5. flock/cli.py +3 -1
  6. flock/components.py +45 -56
  7. flock/context_provider.py +531 -0
  8. flock/correlation_engine.py +8 -4
  9. flock/dashboard/collector.py +48 -29
  10. flock/dashboard/events.py +10 -4
  11. flock/dashboard/launcher.py +3 -1
  12. flock/dashboard/models/graph.py +9 -3
  13. flock/dashboard/service.py +143 -72
  14. flock/dashboard/websocket.py +17 -4
  15. flock/engines/dspy_engine.py +174 -98
  16. flock/engines/examples/simple_batch_engine.py +9 -3
  17. flock/examples.py +6 -2
  18. flock/frontend/src/services/indexeddb.test.ts +4 -4
  19. flock/frontend/src/services/indexeddb.ts +1 -1
  20. flock/helper/cli_helper.py +14 -1
  21. flock/logging/auto_trace.py +6 -1
  22. flock/logging/formatters/enum_builder.py +3 -1
  23. flock/logging/formatters/theme_builder.py +32 -17
  24. flock/logging/formatters/themed_formatter.py +38 -22
  25. flock/logging/logging.py +21 -7
  26. flock/logging/telemetry.py +9 -3
  27. flock/logging/telemetry_exporter/duckdb_exporter.py +27 -25
  28. flock/logging/trace_and_logged.py +14 -5
  29. flock/mcp/__init__.py +3 -6
  30. flock/mcp/client.py +49 -19
  31. flock/mcp/config.py +12 -6
  32. flock/mcp/manager.py +6 -2
  33. flock/mcp/servers/sse/flock_sse_server.py +9 -3
  34. flock/mcp/servers/streamable_http/flock_streamable_http_server.py +6 -2
  35. flock/mcp/tool.py +18 -6
  36. flock/mcp/types/handlers.py +3 -1
  37. flock/mcp/types/types.py +9 -3
  38. flock/orchestrator.py +204 -50
  39. flock/orchestrator_component.py +15 -5
  40. flock/patches/dspy_streaming_patch.py +12 -4
  41. flock/registry.py +9 -3
  42. flock/runtime.py +69 -18
  43. flock/service.py +19 -6
  44. flock/store.py +29 -10
  45. flock/subscription.py +6 -4
  46. flock/utilities.py +41 -13
  47. flock/utility/output_utility_component.py +31 -11
  48. {flock_core-0.5.9.dist-info → flock_core-0.5.10.dist-info}/METADATA +132 -2
  49. {flock_core-0.5.9.dist-info → flock_core-0.5.10.dist-info}/RECORD +52 -51
  50. {flock_core-0.5.9.dist-info → flock_core-0.5.10.dist-info}/WHEEL +0 -0
  51. {flock_core-0.5.9.dist-info → flock_core-0.5.10.dist-info}/entry_points.txt +0 -0
  52. {flock_core-0.5.9.dist-info → flock_core-0.5.10.dist-info}/licenses/LICENSE +0 -0
flock/runtime.py CHANGED
@@ -5,7 +5,7 @@ from __future__ import annotations
5
5
  from typing import Any
6
6
  from uuid import UUID
7
7
 
8
- from pydantic import BaseModel, Field
8
+ from pydantic import BaseModel, ConfigDict, Field
9
9
 
10
10
  from flock.artifacts import Artifact
11
11
 
@@ -28,7 +28,9 @@ class EvalInputs(BaseModel):
28
28
 
29
29
  Example:
30
30
  >>> class TaskProcessor(EngineComponent):
31
- ... async def evaluate(self, agent, ctx, inputs: EvalInputs) -> EvalResult:
31
+ ... async def evaluate(
32
+ ... self, agent, ctx, inputs: EvalInputs
33
+ ... ) -> EvalResult:
32
34
  ... task = inputs.first_as(Task)
33
35
  ... if not task:
34
36
  ... return EvalResult.empty()
@@ -88,9 +90,13 @@ class EvalResult(BaseModel):
88
90
 
89
91
  Example:
90
92
  >>> class TaskProcessor(EngineComponent):
91
- ... async def evaluate(self, agent, ctx, inputs: EvalInputs) -> EvalResult:
93
+ ... async def evaluate(
94
+ ... self, agent, ctx, inputs: EvalInputs
95
+ ... ) -> EvalResult:
92
96
  ... task = inputs.first_as(Task)
93
- ... processed = Task(name=f"Done: {task.name}", priority=task.priority)
97
+ ... processed = Task(
98
+ ... name=f"Done: {task.name}", priority=task.priority
99
+ ... )
94
100
  ... return EvalResult.from_object(processed, agent=agent)
95
101
  """
96
102
  from flock.artifacts import Artifact
@@ -136,14 +142,16 @@ class EvalResult(BaseModel):
136
142
 
137
143
  Example:
138
144
  >>> class MovieEngine(EngineComponent):
139
- ... async def evaluate(self, agent, ctx, inputs: EvalInputs) -> EvalResult:
145
+ ... async def evaluate(
146
+ ... self, agent, ctx, inputs: EvalInputs
147
+ ... ) -> EvalResult:
140
148
  ... idea = inputs.first_as(Idea)
141
- ... movie = Movie(title=idea.topic.upper(), runtime=240, synopsis="...")
149
+ ... movie = Movie(
150
+ ... title=idea.topic.upper(), runtime=240, synopsis="..."
151
+ ... )
142
152
  ... tagline = Tagline(line="Don't miss it!")
143
153
  ... return EvalResult.from_objects(
144
- ... movie, tagline,
145
- ... agent=agent,
146
- ... metrics={"confidence": 0.9}
154
+ ... movie, tagline, agent=agent, metrics={"confidence": 0.9}
147
155
  ... )
148
156
  """
149
157
  from flock.artifacts import Artifact
@@ -190,7 +198,9 @@ class EvalResult(BaseModel):
190
198
 
191
199
  Example:
192
200
  >>> class ConditionalProcessor(EngineComponent):
193
- ... async def evaluate(self, agent, ctx, inputs: EvalInputs) -> EvalResult:
201
+ ... async def evaluate(
202
+ ... self, agent, ctx, inputs: EvalInputs
203
+ ... ) -> EvalResult:
194
204
  ... task = inputs.first_as(Task)
195
205
  ... if task.priority < 3:
196
206
  ... return EvalResult.empty() # Skip low priority
@@ -229,12 +239,15 @@ class EvalResult(BaseModel):
229
239
 
230
240
  Example:
231
241
  >>> class ValidationAgent(EngineComponent):
232
- ... async def evaluate(self, agent, ctx, inputs: EvalInputs) -> EvalResult:
242
+ ... async def evaluate(
243
+ ... self, agent, ctx, inputs: EvalInputs
244
+ ... ) -> EvalResult:
233
245
  ... task = inputs.first_as(Task)
234
246
  ... is_valid = task.priority >= 1
235
- ... return EvalResult.with_state(
236
- ... {"validation_passed": is_valid, "validator": "priority_check"}
237
- ... )
247
+ ... return EvalResult.with_state({
248
+ ... "validation_passed": is_valid,
249
+ ... "validator": "priority_check",
250
+ ... })
238
251
  """
239
252
  return cls(
240
253
  artifacts=[],
@@ -245,13 +258,51 @@ class EvalResult(BaseModel):
245
258
 
246
259
 
247
260
  class Context(BaseModel):
248
- board: Any
249
- orchestrator: Any
250
- correlation_id: UUID | None = None # NEW!
261
+ """Runtime context for agent execution.
262
+
263
+ SECURITY FIX (2025-10-17): Simplified to data-only design.
264
+ Context is now just pre-filtered data with ZERO capabilities.
265
+
266
+ Vulnerabilities fixed:
267
+ - Vulnerability #1 (READ): Agents could bypass visibility via ctx.board.list()
268
+ - Vulnerability #2 (WRITE): Agents could bypass validation via ctx.board.publish()
269
+ - Vulnerability #3 (GOD MODE): Agents had unlimited ctx.orchestrator access
270
+ - Vulnerability #4 (STORE ACCESS): Agents could access ctx.store or ctx.provider._store
271
+
272
+ Solution: Orchestrator evaluates context BEFORE creating Context.
273
+ Engines receive only pre-filtered artifact data via ctx.artifacts.
274
+ No provider, no store, no capabilities - just immutable data.
275
+
276
+ Design Philosophy: Engines are pure functions (input + context → output).
277
+ They don't query, they don't mutate - they only transform data.
278
+ """
279
+
280
+ model_config = ConfigDict(frozen=True)
281
+
282
+ # ❌ REMOVED: board: Any (security vulnerability)
283
+ # ❌ REMOVED: orchestrator: Any (security vulnerability)
284
+ # ❌ REMOVED: provider: Any (security vulnerability - engines could call provider methods)
285
+ # ❌ REMOVED: store: Any (security vulnerability - direct store access)
286
+
287
+ # ✅ FINAL SOLUTION: Pre-filtered artifacts (evaluated by orchestrator)
288
+ # Engines can only read this list - they cannot query for more data
289
+ artifacts: list[Artifact] = Field(
290
+ default_factory=list,
291
+ description="Pre-filtered conversation context artifacts (evaluated by orchestrator using context provider)",
292
+ )
293
+
294
+ # ✅ Agent identity (informational only - used by orchestrator for logging/tracing)
295
+ agent_identity: Any = Field(
296
+ default=None,
297
+ description="Agent identity (informational) - engines cannot use this to query data",
298
+ )
299
+
300
+ correlation_id: UUID | None = None
251
301
  task_id: str
252
302
  state: dict[str, Any] = Field(default_factory=dict)
253
303
  is_batch: bool = Field(
254
- default=False, description="True if this execution is processing a BatchSpec accumulation"
304
+ default=False,
305
+ description="True if this execution is processing a BatchSpec accumulation",
255
306
  )
256
307
 
257
308
  def get_variable(self, key: str, default: Any = None) -> Any:
flock/service.py CHANGED
@@ -40,7 +40,9 @@ class BlackboardHTTPService:
40
40
  "visibility": artifact.visibility.model_dump(mode="json"),
41
41
  "visibility_kind": getattr(artifact.visibility, "kind", "Unknown"),
42
42
  "created_at": artifact.created_at.isoformat(),
43
- "correlation_id": str(artifact.correlation_id) if artifact.correlation_id else None,
43
+ "correlation_id": str(artifact.correlation_id)
44
+ if artifact.correlation_id
45
+ else None,
44
46
  "partition_key": artifact.partition_key,
45
47
  "tags": sorted(artifact.tags),
46
48
  "version": artifact.version,
@@ -56,7 +58,9 @@ class BlackboardHTTPService:
56
58
  }
57
59
  for record in consumptions
58
60
  ]
59
- data["consumed_by"] = sorted({record.consumer for record in consumptions})
61
+ data["consumed_by"] = sorted({
62
+ record.consumer for record in consumptions
63
+ })
60
64
  return data
61
65
 
62
66
  def _parse_datetime(value: str | None, label: str) -> datetime | None:
@@ -65,7 +69,9 @@ class BlackboardHTTPService:
65
69
  try:
66
70
  return datetime.fromisoformat(value)
67
71
  except ValueError as exc: # pragma: no cover - FastAPI converts
68
- raise HTTPException(status_code=400, detail=f"Invalid {label}: {value}") from exc
72
+ raise HTTPException(
73
+ status_code=400, detail=f"Invalid {label}: {value}"
74
+ ) from exc
69
75
 
70
76
  def _make_filter_config(
71
77
  type_names: list[str] | None,
@@ -129,7 +135,9 @@ class BlackboardHTTPService:
129
135
  items: list[dict[str, Any]] = []
130
136
  for artifact in artifacts:
131
137
  if isinstance(artifact, ArtifactEnvelope):
132
- items.append(_serialize_artifact(artifact.artifact, artifact.consumptions))
138
+ items.append(
139
+ _serialize_artifact(artifact.artifact, artifact.consumptions)
140
+ )
133
141
  else:
134
142
  items.append(_serialize_artifact(artifact))
135
143
  return {
@@ -179,7 +187,9 @@ class BlackboardHTTPService:
179
187
  type_name = item.get("type")
180
188
  payload = item.get("payload") or {}
181
189
  if not type_name:
182
- raise HTTPException(status_code=400, detail="Each input requires 'type'.")
190
+ raise HTTPException(
191
+ status_code=400, detail="Each input requires 'type'."
192
+ )
183
193
  model = type_registry.resolve(type_name)
184
194
  instance = model(**payload)
185
195
  inputs.append(instance)
@@ -253,7 +263,10 @@ class BlackboardHTTPService:
253
263
 
254
264
  @app.get("/metrics")
255
265
  async def metrics() -> PlainTextResponse:
256
- lines = [f"blackboard_{key} {value}" for key, value in orchestrator.metrics.items()]
266
+ lines = [
267
+ f"blackboard_{key} {value}"
268
+ for key, value in orchestrator.metrics.items()
269
+ ]
257
270
  return PlainTextResponse("\n".join(lines))
258
271
 
259
272
  def run(
flock/store.py CHANGED
@@ -227,7 +227,9 @@ class InMemoryBlackboardStore(BlackboardStore):
227
227
  self._lock = Lock()
228
228
  self._by_id: dict[UUID, Artifact] = {}
229
229
  self._by_type: dict[str, list[Artifact]] = defaultdict(list)
230
- self._consumptions_by_artifact: dict[UUID, list[ConsumptionRecord]] = defaultdict(list)
230
+ self._consumptions_by_artifact: dict[UUID, list[ConsumptionRecord]] = (
231
+ defaultdict(list)
232
+ )
231
233
  self._agent_snapshots: dict[str, AgentSnapshotRecord] = {}
232
234
 
233
235
  async def publish(self, artifact: Artifact) -> None:
@@ -254,7 +256,9 @@ class InMemoryBlackboardStore(BlackboardStore):
254
256
  artifacts = self._by_type.get(canonical, [])
255
257
  return [artifact_type(**artifact.payload) for artifact in artifacts] # type: ignore
256
258
 
257
- async def extend(self, artifacts: Iterable[Artifact]) -> None: # pragma: no cover - helper
259
+ async def extend(
260
+ self, artifacts: Iterable[Artifact]
261
+ ) -> None: # pragma: no cover - helper
258
262
  for artifact in artifacts:
259
263
  await self.publish(artifact)
260
264
 
@@ -280,7 +284,9 @@ class InMemoryBlackboardStore(BlackboardStore):
280
284
  filters = filters or FilterConfig()
281
285
  canonical: set[str] | None = None
282
286
  if filters.type_names:
283
- canonical = {type_registry.resolve_name(name) for name in filters.type_names}
287
+ canonical = {
288
+ type_registry.resolve_name(name) for name in filters.type_names
289
+ }
284
290
 
285
291
  visibility_filter = filters.visibility or set()
286
292
 
@@ -347,7 +353,9 @@ class InMemoryBlackboardStore(BlackboardStore):
347
353
  if not isinstance(artifact, Artifact):
348
354
  raise TypeError("Expected Artifact instance")
349
355
  by_type[artifact.type] = by_type.get(artifact.type, 0) + 1
350
- by_producer[artifact.produced_by] = by_producer.get(artifact.produced_by, 0) + 1
356
+ by_producer[artifact.produced_by] = (
357
+ by_producer.get(artifact.produced_by, 0) + 1
358
+ )
351
359
  kind = getattr(artifact.visibility, "kind", "Unknown")
352
360
  by_visibility[kind] = by_visibility.get(kind, 0) + 1
353
361
  for tag in artifact.tags:
@@ -476,7 +484,9 @@ class SQLiteBlackboardStore(BlackboardStore):
476
484
  "version": artifact.version,
477
485
  "visibility": visibility_json,
478
486
  "tags": tags_json,
479
- "correlation_id": str(artifact.correlation_id) if artifact.correlation_id else None,
487
+ "correlation_id": str(artifact.correlation_id)
488
+ if artifact.correlation_id
489
+ else None,
480
490
  "partition_key": artifact.partition_key,
481
491
  "created_at": created_at,
482
492
  }
@@ -816,7 +826,8 @@ class SQLiteBlackboardStore(BlackboardStore):
816
826
  by_visibility_rows = await cursor.fetchall()
817
827
  await cursor.close()
818
828
  by_visibility = {
819
- (row["visibility_kind"] or "Unknown"): row["count"] for row in by_visibility_rows
829
+ (row["visibility_kind"] or "Unknown"): row["count"]
830
+ for row in by_visibility_rows
820
831
  }
821
832
 
822
833
  tag_query = f"""
@@ -839,7 +850,9 @@ class SQLiteBlackboardStore(BlackboardStore):
839
850
  cursor = await conn.execute(range_query, params_tuple)
840
851
  range_row = await cursor.fetchone()
841
852
  await cursor.close()
842
- earliest = range_row["earliest"] if range_row and range_row["earliest"] else None
853
+ earliest = (
854
+ range_row["earliest"] if range_row and range_row["earliest"] else None
855
+ )
843
856
  latest = range_row["latest"] if range_row and range_row["latest"] else None
844
857
 
845
858
  return {
@@ -902,7 +915,9 @@ class SQLiteBlackboardStore(BlackboardStore):
902
915
  consumption_rows = await cursor.fetchall()
903
916
  await cursor.close()
904
917
 
905
- consumed_by_type = {row["canonical_type"]: row["count"] for row in consumption_rows}
918
+ consumed_by_type = {
919
+ row["canonical_type"]: row["count"] for row in consumption_rows
920
+ }
906
921
  consumed_total = sum(consumed_by_type.values())
907
922
 
908
923
  return {
@@ -1156,7 +1171,9 @@ class SQLiteBlackboardStore(BlackboardStore):
1156
1171
  params: list[Any] = []
1157
1172
 
1158
1173
  if filters.type_names:
1159
- canonical = {type_registry.resolve_name(name) for name in filters.type_names}
1174
+ canonical = {
1175
+ type_registry.resolve_name(name) for name in filters.type_names
1176
+ }
1160
1177
  placeholders = ", ".join("?" for _ in canonical)
1161
1178
  conditions.append(f"{prefix}canonical_type IN ({placeholders})")
1162
1179
  params.extend(sorted(canonical))
@@ -1172,7 +1189,9 @@ class SQLiteBlackboardStore(BlackboardStore):
1172
1189
 
1173
1190
  if filters.visibility:
1174
1191
  placeholders = ", ".join("?" for _ in filters.visibility)
1175
- conditions.append(f"json_extract({prefix}visibility, '$.kind') IN ({placeholders})")
1192
+ conditions.append(
1193
+ f"json_extract({prefix}visibility, '$.kind') IN ({placeholders})"
1194
+ )
1176
1195
  params.extend(sorted(filters.visibility))
1177
1196
 
1178
1197
  if filters.start is not None:
flock/subscription.py CHANGED
@@ -102,7 +102,7 @@ class Subscription:
102
102
  where: Sequence[Predicate] | None = None,
103
103
  text_predicates: Sequence[TextPredicate] | None = None,
104
104
  from_agents: Iterable[str] | None = None,
105
- channels: Iterable[str] | None = None,
105
+ tags: Iterable[str] | None = None,
106
106
  join: JoinSpec | None = None,
107
107
  batch: BatchSpec | None = None,
108
108
  delivery: str = "exclusive",
@@ -116,7 +116,9 @@ class Subscription:
116
116
 
117
117
  # Register all types and build counts (supports duplicates for count-based AND gates)
118
118
  type_name_list = [type_registry.register(t) for t in types]
119
- self.type_names: set[str] = set(type_name_list) # Unique type names (for matching)
119
+ self.type_names: set[str] = set(
120
+ type_name_list
121
+ ) # Unique type names (for matching)
120
122
 
121
123
  # Count-based AND gate: Track how many of each type are required
122
124
  # Example: .consumes(A, A, B) → {"TypeA": 2, "TypeB": 1}
@@ -127,7 +129,7 @@ class Subscription:
127
129
  self.where = list(where or [])
128
130
  self.text_predicates = list(text_predicates or [])
129
131
  self.from_agents = set(from_agents or [])
130
- self.channels = set(channels or [])
132
+ self.tags = set(tags or [])
131
133
  self.join = join
132
134
  self.batch = batch
133
135
  self.delivery = delivery
@@ -145,7 +147,7 @@ class Subscription:
145
147
  return False
146
148
  if self.from_agents and artifact.produced_by not in self.from_agents:
147
149
  return False
148
- if self.channels and not artifact.tags.intersection(self.channels):
150
+ if self.tags and not artifact.tags.intersection(self.tags):
149
151
  return False
150
152
 
151
153
  # Evaluate where predicates on typed payloads
flock/utilities.py CHANGED
@@ -31,7 +31,9 @@ class MetricsUtility(AgentComponent):
31
31
 
32
32
  name: str | None = "metrics"
33
33
 
34
- async def on_pre_evaluate(self, agent, ctx: Context, inputs: EvalInputs) -> EvalInputs:
34
+ async def on_pre_evaluate(
35
+ self, agent, ctx: Context, inputs: EvalInputs
36
+ ) -> EvalInputs:
35
37
  ctx.state.setdefault("metrics", {})[f"{agent.name}:start"] = time.perf_counter()
36
38
  return inputs
37
39
 
@@ -42,7 +44,9 @@ class MetricsUtility(AgentComponent):
42
44
  start = metrics.get(f"{agent.name}:start")
43
45
  if start:
44
46
  metrics[f"{agent.name}:duration_ms"] = (time.perf_counter() - start) * 1000
45
- result.metrics.update({k: v for k, v in metrics.items() if k.endswith("duration_ms")})
47
+ result.metrics.update({
48
+ k: v for k, v in metrics.items() if k.endswith("duration_ms")
49
+ })
46
50
  return result
47
51
 
48
52
 
@@ -78,11 +82,15 @@ class LoggingUtility(AgentComponent):
78
82
 
79
83
  async def on_pre_consume(self, agent, ctx: Context, inputs: list[Any]):
80
84
  summary = ", ".join(self._summarize_artifact(art) for art in inputs) or "<none>"
81
- self._console.log(f"[{agent.name}] consume n={len(inputs)} artifacts -> {summary}")
85
+ self._console.log(
86
+ f"[{agent.name}] consume n={len(inputs)} artifacts -> {summary}"
87
+ )
82
88
  self._render_artifacts(agent.name, inputs, role="input")
83
89
  return await super().on_pre_consume(agent, ctx, inputs)
84
90
 
85
- async def on_pre_evaluate(self, agent, ctx: Context, inputs: EvalInputs) -> EvalInputs:
91
+ async def on_pre_evaluate(
92
+ self, agent, ctx: Context, inputs: EvalInputs
93
+ ) -> EvalInputs:
86
94
  if self._stream_tokens:
87
95
  self._maybe_start_stream(agent, ctx)
88
96
  return await super().on_pre_evaluate(agent, ctx, inputs)
@@ -91,7 +99,9 @@ class LoggingUtility(AgentComponent):
91
99
  self, agent, ctx: Context, inputs: EvalInputs, result: EvalResult
92
100
  ) -> EvalResult:
93
101
  self._render_metrics(agent.name, result.metrics)
94
- self._render_artifacts(agent.name, result.artifacts or inputs.artifacts, role="output")
102
+ self._render_artifacts(
103
+ agent.name, result.artifacts or inputs.artifacts, role="output"
104
+ )
95
105
  if result.logs:
96
106
  self._render_logs(agent.name, result.logs)
97
107
  awaited = await super().on_post_evaluate(agent, ctx, inputs, result)
@@ -102,7 +112,9 @@ class LoggingUtility(AgentComponent):
102
112
  async def on_post_publish(self, agent, ctx: Context, artifact):
103
113
  visibility = getattr(artifact.visibility, "kind", "Public")
104
114
  subtitle = f"visibility={visibility}"
105
- panel = self._build_artifact_panel(artifact, role="published", subtitle=subtitle)
115
+ panel = self._build_artifact_panel(
116
+ artifact, role="published", subtitle=subtitle
117
+ )
106
118
  self._console.print(panel)
107
119
  await super().on_post_publish(agent, ctx, artifact)
108
120
 
@@ -121,7 +133,9 @@ class LoggingUtility(AgentComponent):
121
133
  # ------------------------------------------------------------------
122
134
  # Rendering helpers
123
135
 
124
- def _render_artifacts(self, agent_name: str, artifacts: Sequence[Any], *, role: str) -> None:
136
+ def _render_artifacts(
137
+ self, agent_name: str, artifacts: Sequence[Any], *, role: str
138
+ ) -> None:
125
139
  for artifact in artifacts:
126
140
  panel = self._build_artifact_panel(artifact, role=role)
127
141
  self._console.print(panel)
@@ -189,7 +203,9 @@ class LoggingUtility(AgentComponent):
189
203
  else:
190
204
  textual.append(line)
191
205
  for payload in json_sections:
192
- panel = Panel(payload, title=f"{agent_name} ▸ dspy.output", border_style="green")
206
+ panel = Panel(
207
+ payload, title=f"{agent_name} ▸ dspy.output", border_style="green"
208
+ )
193
209
  self._console.print(panel)
194
210
  if textual:
195
211
  body = Text("\n".join(textual) + "\n")
@@ -244,7 +260,9 @@ class LoggingUtility(AgentComponent):
244
260
  with contextlib.suppress(asyncio.CancelledError):
245
261
  await task
246
262
 
247
- async def _consume_stream(self, agent_name: str, stream_key: str, queue: asyncio.Queue) -> None:
263
+ async def _consume_stream(
264
+ self, agent_name: str, stream_key: str, queue: asyncio.Queue
265
+ ) -> None:
248
266
  body = Text()
249
267
  live: Live | None = None
250
268
  try:
@@ -254,7 +272,9 @@ class LoggingUtility(AgentComponent):
254
272
  break
255
273
  kind = event.get("kind")
256
274
  if live is None:
257
- live_panel = Panel(body, title=f"{agent_name} ▸ streaming", border_style="cyan")
275
+ live_panel = Panel(
276
+ body, title=f"{agent_name} ▸ streaming", border_style="cyan"
277
+ )
258
278
  live = Live(
259
279
  live_panel,
260
280
  console=self._console,
@@ -274,19 +294,27 @@ class LoggingUtility(AgentComponent):
274
294
  message = event.get("message") or ""
275
295
  body.append(f"\n⚠ {message}\n", style="bold red")
276
296
  if live is not None:
277
- live.update(Panel(body, title=f"{agent_name} ▸ streaming", border_style="cyan"))
297
+ live.update(
298
+ Panel(
299
+ body, title=f"{agent_name} ▸ streaming", border_style="cyan"
300
+ )
301
+ )
278
302
  finally:
279
303
  if live is not None:
280
304
  live.__exit__(None, None, None)
281
305
  if body.plain:
282
306
  self._console.print(
283
- Panel(body, title=f"{agent_name} ▸ stream transcript", border_style="cyan")
307
+ Panel(
308
+ body, title=f"{agent_name} ▸ stream transcript", border_style="cyan"
309
+ )
284
310
  )
285
311
 
286
312
  def _stream_key(self, agent, ctx: Context) -> str:
287
313
  return f"{ctx.task_id}:{agent.name}"
288
314
 
289
- def _attach_stream_queue(self, state: MutableMapping[str, Any], queue: asyncio.Queue) -> None:
315
+ def _attach_stream_queue(
316
+ self, state: MutableMapping[str, Any], queue: asyncio.Queue
317
+ ) -> None:
290
318
  state.setdefault("_logging", {})["stream_queue"] = queue
291
319
 
292
320
  def _detach_stream_queue(self, state: MutableMapping[str, Any]) -> None:
@@ -28,8 +28,12 @@ class OutputUtilityConfig(AgentComponentConfig):
28
28
  theme: OutputTheme = Field(
29
29
  default=OutputTheme.catppuccin_mocha, description="Theme for output formatting"
30
30
  )
31
- render_table: bool = Field(default=True, description="Whether to render output as a table")
32
- max_length: int = Field(default=1000, description="Maximum length for displayed output")
31
+ render_table: bool = Field(
32
+ default=True, description="Whether to render output as a table"
33
+ )
34
+ max_length: int = Field(
35
+ default=1000, description="Maximum length for displayed output"
36
+ )
33
37
  truncate_long_values: bool = Field(
34
38
  default=True, description="Whether to truncate long values in display"
35
39
  )
@@ -61,7 +65,9 @@ class OutputUtilityComponent(AgentComponent):
61
65
  default_factory=OutputUtilityConfig, description="Output configuration"
62
66
  )
63
67
 
64
- def __init__(self, name: str = "output", config: OutputUtilityConfig | None = None, **data):
68
+ def __init__(
69
+ self, name: str = "output", config: OutputUtilityConfig | None = None, **data
70
+ ):
65
71
  if config is None:
66
72
  config = OutputUtilityConfig()
67
73
  super().__init__(name=name, config=config, **data)
@@ -96,7 +102,11 @@ class OutputUtilityComponent(AgentComponent):
96
102
  items = []
97
103
  prefix = " " * indent
98
104
  for key, value in d.items():
99
- if self.config.truncate_long_values and isinstance(value, str) and len(value) > 100:
105
+ if (
106
+ self.config.truncate_long_values
107
+ and isinstance(value, str)
108
+ and len(value) > 100
109
+ ):
100
110
  value = value[:97] + "..."
101
111
  formatted_value = self._format_value(value, key)
102
112
  items.append(f"{prefix} {key}: {formatted_value}")
@@ -125,7 +135,9 @@ class OutputUtilityComponent(AgentComponent):
125
135
  return f"[CODE:{language}]\n{code}\n[/CODE]"
126
136
 
127
137
  # Replace markdown-style code blocks
128
- return re.sub(r"```(\w+)?\n(.*?)\n```", replace_code_block, text, flags=re.DOTALL)
138
+ return re.sub(
139
+ r"```(\w+)?\n(.*?)\n```", replace_code_block, text, flags=re.DOTALL
140
+ )
129
141
 
130
142
  async def on_post_evaluate(
131
143
  self, agent: "Agent", ctx: Context, inputs: EvalInputs, result: EvalResult
@@ -138,7 +150,9 @@ class OutputUtilityComponent(AgentComponent):
138
150
  streamed_artifact_id = None
139
151
 
140
152
  if ctx:
141
- streaming_live_handled = bool(ctx.get_variable("_flock_stream_live_active", False))
153
+ streaming_live_handled = bool(
154
+ ctx.get_variable("_flock_stream_live_active", False)
155
+ )
142
156
  output_queued = bool(ctx.get_variable("_flock_output_queued", False))
143
157
  streamed_artifact_id = ctx.get_variable("_flock_streamed_artifact_id")
144
158
 
@@ -162,20 +176,24 @@ class OutputUtilityComponent(AgentComponent):
162
176
 
163
177
  # Skip output if streaming already handled it (and no ID to update)
164
178
  if streaming_live_handled:
165
- logger.debug("Skipping static table because streaming rendered live output.")
179
+ logger.debug(
180
+ "Skipping static table because streaming rendered live output."
181
+ )
166
182
  return result
167
183
 
168
184
  # If output was queued due to concurrent stream, wait and then display
169
185
  if output_queued:
170
186
  # Wait for active streams to complete
171
- orchestrator = getattr(ctx, "orchestrator", None)
172
- if orchestrator:
187
+ # Phase 6+7 Security Fix: Use Agent class variable instead of ctx.state
188
+ if ctx:
173
189
  import asyncio
174
190
 
191
+ from flock.agent import Agent
192
+
175
193
  # Wait until no streams are active
176
194
  max_wait = 30 # seconds
177
195
  waited = 0
178
- while getattr(orchestrator, "_active_streams", 0) > 0 and waited < max_wait:
196
+ while Agent._streaming_counter > 0 and waited < max_wait:
179
197
  await asyncio.sleep(0.1)
180
198
  waited += 0.1
181
199
  logger.debug(
@@ -189,7 +207,9 @@ class OutputUtilityComponent(AgentComponent):
189
207
  try:
190
208
  # Create a copy or select relevant parts to avoid modifying original result dict directly
191
209
  display_result = result.copy()
192
- display_result["context_snapshot"] = ctx.to_dict() # Potential performance hit
210
+ display_result["context_snapshot"] = (
211
+ ctx.to_dict()
212
+ ) # Potential performance hit
193
213
  except Exception:
194
214
  display_result = result.copy()
195
215
  display_result["context_snapshot"] = "[Error serializing context]"