flock-core 0.5.9__py3-none-any.whl → 0.5.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of flock-core might be problematic. Click here for more details.

Files changed (54) hide show
  1. flock/agent.py +149 -62
  2. flock/api/themes.py +6 -2
  3. flock/api_models.py +285 -0
  4. flock/artifact_collector.py +6 -3
  5. flock/batch_accumulator.py +3 -1
  6. flock/cli.py +3 -1
  7. flock/components.py +45 -56
  8. flock/context_provider.py +531 -0
  9. flock/correlation_engine.py +8 -4
  10. flock/dashboard/collector.py +48 -29
  11. flock/dashboard/events.py +10 -4
  12. flock/dashboard/launcher.py +3 -1
  13. flock/dashboard/models/graph.py +9 -3
  14. flock/dashboard/service.py +187 -93
  15. flock/dashboard/websocket.py +17 -4
  16. flock/engines/dspy_engine.py +174 -98
  17. flock/engines/examples/simple_batch_engine.py +9 -3
  18. flock/examples.py +6 -2
  19. flock/frontend/src/services/indexeddb.test.ts +4 -4
  20. flock/frontend/src/services/indexeddb.ts +1 -1
  21. flock/helper/cli_helper.py +14 -1
  22. flock/logging/auto_trace.py +6 -1
  23. flock/logging/formatters/enum_builder.py +3 -1
  24. flock/logging/formatters/theme_builder.py +32 -17
  25. flock/logging/formatters/themed_formatter.py +38 -22
  26. flock/logging/logging.py +21 -7
  27. flock/logging/telemetry.py +9 -3
  28. flock/logging/telemetry_exporter/duckdb_exporter.py +27 -25
  29. flock/logging/trace_and_logged.py +14 -5
  30. flock/mcp/__init__.py +3 -6
  31. flock/mcp/client.py +49 -19
  32. flock/mcp/config.py +12 -6
  33. flock/mcp/manager.py +6 -2
  34. flock/mcp/servers/sse/flock_sse_server.py +9 -3
  35. flock/mcp/servers/streamable_http/flock_streamable_http_server.py +6 -2
  36. flock/mcp/tool.py +18 -6
  37. flock/mcp/types/handlers.py +3 -1
  38. flock/mcp/types/types.py +9 -3
  39. flock/orchestrator.py +449 -58
  40. flock/orchestrator_component.py +15 -5
  41. flock/patches/dspy_streaming_patch.py +12 -4
  42. flock/registry.py +9 -3
  43. flock/runtime.py +69 -18
  44. flock/service.py +135 -64
  45. flock/store.py +29 -10
  46. flock/subscription.py +6 -4
  47. flock/system_artifacts.py +33 -0
  48. flock/utilities.py +41 -13
  49. flock/utility/output_utility_component.py +31 -11
  50. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/METADATA +150 -26
  51. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/RECORD +54 -51
  52. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/WHEEL +0 -0
  53. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/entry_points.txt +0 -0
  54. {flock_core-0.5.9.dist-info → flock_core-0.5.11.dist-info}/licenses/LICENSE +0 -0
flock/agent.py CHANGED
@@ -47,12 +47,14 @@ class MCPServerConfig(TypedDict, total=False):
47
47
  >>> config: MCPServerConfig = {"roots": ["/workspace/data"]}
48
48
 
49
49
  >>> # Tool whitelist only
50
- >>> config: MCPServerConfig = {"tool_whitelist": ["read_file", "write_file"]}
50
+ >>> config: MCPServerConfig = {
51
+ ... "tool_whitelist": ["read_file", "write_file"]
52
+ ... }
51
53
 
52
54
  >>> # Both restrictions
53
55
  >>> config: MCPServerConfig = {
54
56
  ... "roots": ["/workspace/data"],
55
- ... "tool_whitelist": ["read_file"]
57
+ ... "tool_whitelist": ["read_file"],
56
58
  ... }
57
59
  """
58
60
 
@@ -66,9 +68,9 @@ class AgentOutput:
66
68
  default_visibility: Visibility
67
69
  count: int = 1 # Number of artifacts to generate (fan-out)
68
70
  filter_predicate: Callable[[BaseModel], bool] | None = None # Where clause
69
- validate_predicate: Callable[[BaseModel], bool] | list[tuple[Callable, str]] | None = (
70
- None # Validation logic
71
- )
71
+ validate_predicate: (
72
+ Callable[[BaseModel], bool] | list[tuple[Callable, str]] | None
73
+ ) = None # Validation logic
72
74
  group_description: str | None = None # Group description override
73
75
 
74
76
  def __post_init__(self):
@@ -127,6 +129,13 @@ class Agent(metaclass=AutoTracedMeta):
127
129
  All public methods are automatically traced via OpenTelemetry.
128
130
  """
129
131
 
132
+ # Phase 6+7: Class-level streaming coordination (SHARED across ALL agent instances)
133
+ # These class variables enable all agents to coordinate CLI streaming behavior
134
+ _streaming_counter: int = 0 # Global count of agents currently streaming to CLI
135
+ _websocket_broadcast_global: Any = (
136
+ None # WebSocket broadcast wrapper (dashboard mode)
137
+ )
138
+
130
139
  def __init__(self, name: str, *, orchestrator: Flock) -> None:
131
140
  self.name = name
132
141
  self.description: str | None = None
@@ -145,10 +154,16 @@ class Agent(metaclass=AutoTracedMeta):
145
154
  self.tenant_id: str | None = None
146
155
  self.model: str | None = None
147
156
  self.prevent_self_trigger: bool = True # T065: Prevent infinite feedback loops
157
+ # Phase 3: Per-agent context provider (security fix)
158
+ self.context_provider: Any = None
148
159
  # MCP integration
149
160
  self.mcp_server_names: set[str] = set()
150
- self.mcp_mount_points: list[str] = [] # Deprecated: Use mcp_server_mounts instead
151
- self.mcp_server_mounts: dict[str, list[str]] = {} # Server-specific mount points
161
+ self.mcp_mount_points: list[
162
+ str
163
+ ] = [] # Deprecated: Use mcp_server_mounts instead
164
+ self.mcp_server_mounts: dict[
165
+ str, list[str]
166
+ ] = {} # Server-specific mount points
152
167
  self.tool_whitelist: list[str] | None = None
153
168
 
154
169
  @property
@@ -158,7 +173,9 @@ class Agent(metaclass=AutoTracedMeta):
158
173
 
159
174
  @property
160
175
  def identity(self) -> AgentIdentity:
161
- return AgentIdentity(name=self.name, labels=self.labels, tenant_id=self.tenant_id)
176
+ return AgentIdentity(
177
+ name=self.name, labels=self.labels, tenant_id=self.tenant_id
178
+ )
162
179
 
163
180
  @staticmethod
164
181
  def _component_display_name(component: AgentComponent) -> str:
@@ -199,7 +216,9 @@ class Agent(metaclass=AutoTracedMeta):
199
216
  self._resolve_utilities()
200
217
  await self._run_initialize(ctx)
201
218
  processed_inputs = await self._run_pre_consume(ctx, artifacts)
202
- eval_inputs = EvalInputs(artifacts=processed_inputs, state=dict(ctx.state))
219
+ eval_inputs = EvalInputs(
220
+ artifacts=processed_inputs, state=dict(ctx.state)
221
+ )
203
222
  eval_inputs = await self._run_pre_evaluate(ctx, eval_inputs)
204
223
 
205
224
  # Phase 3: Call engine ONCE PER OutputGroup
@@ -218,13 +237,19 @@ class Agent(metaclass=AutoTracedMeta):
218
237
  # Loop over each output group
219
238
  for group_idx, output_group in enumerate(self.output_groups):
220
239
  # Prepare group-specific context
221
- group_ctx = self._prepare_group_context(ctx, group_idx, output_group)
240
+ group_ctx = self._prepare_group_context(
241
+ ctx, group_idx, output_group
242
+ )
222
243
 
223
244
  # Phase 7: Single evaluation path with auto-detection
224
245
  # Engine's evaluate() auto-detects batch/fan-out from ctx and output_group
225
- result = await self._run_engines(group_ctx, eval_inputs, output_group)
246
+ result = await self._run_engines(
247
+ group_ctx, eval_inputs, output_group
248
+ )
226
249
 
227
- result = await self._run_post_evaluate(group_ctx, eval_inputs, result)
250
+ result = await self._run_post_evaluate(
251
+ group_ctx, eval_inputs, result
252
+ )
228
253
 
229
254
  # Extract outputs for THIS group only
230
255
  group_outputs = await self._make_outputs_for_group(
@@ -290,7 +315,10 @@ class Agent(metaclass=AutoTracedMeta):
290
315
  for tool_key, tool_entry in tools_dict.items():
291
316
  if isinstance(tool_entry, dict):
292
317
  original_name = tool_entry.get("original_name", None)
293
- if original_name is not None and original_name in tool_whitelist:
318
+ if (
319
+ original_name is not None
320
+ and original_name in tool_whitelist
321
+ ):
294
322
  filtered_tools[tool_key] = tool_entry
295
323
 
296
324
  tools_dict = filtered_tools
@@ -315,7 +343,9 @@ class Agent(metaclass=AutoTracedMeta):
315
343
  except Exception as e:
316
344
  # Architecture Decision: AD007 - Graceful Degradation
317
345
  # Agent continues with native tools only
318
- logger.error(f"Failed to load MCP tools for agent {self.name}: {e}", exc_info=True)
346
+ logger.error(
347
+ f"Failed to load MCP tools for agent {self.name}: {e}", exc_info=True
348
+ )
319
349
  return []
320
350
 
321
351
  async def _run_initialize(self, ctx: Context) -> None:
@@ -336,7 +366,9 @@ class Agent(metaclass=AutoTracedMeta):
336
366
  for engine in self.engines:
337
367
  await engine.on_initialize(self, ctx)
338
368
 
339
- async def _run_pre_consume(self, ctx: Context, inputs: list[Artifact]) -> list[Artifact]:
369
+ async def _run_pre_consume(
370
+ self, ctx: Context, inputs: list[Artifact]
371
+ ) -> list[Artifact]:
340
372
  current = inputs
341
373
  for component in self._sorted_utilities():
342
374
  comp_name = self._component_display_name(component)
@@ -408,7 +440,13 @@ class Agent(metaclass=AutoTracedMeta):
408
440
  if isinstance(result, BaseModel) and not isinstance(result, ER):
409
441
  result = ER.from_object(result, agent=self)
410
442
 
411
- result = await engine.on_post_evaluate(self, ctx, current_inputs, result)
443
+ artifacts = result.artifacts
444
+ for artifact in artifacts:
445
+ artifact.correlation_id = ctx.correlation_id
446
+
447
+ result = await engine.on_post_evaluate(
448
+ self, ctx, current_inputs, result
449
+ )
412
450
  accumulated_logs.extend(result.logs)
413
451
  accumulated_metrics.update(result.metrics)
414
452
  merged_state = dict(current_inputs.state)
@@ -484,9 +522,12 @@ class Agent(metaclass=AutoTracedMeta):
484
522
  if matching_artifact:
485
523
  metadata["artifact_id"] = matching_artifact.id
486
524
 
487
- artifact = output_decl.apply(payload, produced_by=self.name, metadata=metadata)
525
+ artifact = output_decl.apply(
526
+ payload, produced_by=self.name, metadata=metadata
527
+ )
488
528
  produced.append(artifact)
489
- await ctx.board.publish(artifact)
529
+ # Phase 6: REMOVED publishing - orchestrator now handles it
530
+ # await ctx.board.publish(artifact)
490
531
 
491
532
  return produced
492
533
 
@@ -604,7 +645,9 @@ class Agent(metaclass=AutoTracedMeta):
604
645
  model_instance = model_cls(**artifact.payload)
605
646
  for check, error_msg in output_decl.validate_predicate:
606
647
  if not check(model_instance):
607
- raise ValueError(f"{error_msg}: {output_decl.spec.type_name}")
648
+ raise ValueError(
649
+ f"{error_msg}: {output_decl.spec.type_name}"
650
+ )
608
651
 
609
652
  # 5. Apply visibility and publish artifacts (Phase 5)
610
653
  for artifact_from_engine in matching_artifacts:
@@ -626,14 +669,20 @@ class Agent(metaclass=AutoTracedMeta):
626
669
 
627
670
  # Re-wrap the artifact with agent metadata
628
671
  artifact = output_decl.apply(
629
- artifact_from_engine.payload, produced_by=self.name, metadata=metadata
672
+ artifact_from_engine.payload,
673
+ produced_by=self.name,
674
+ metadata=metadata,
630
675
  )
631
676
  produced.append(artifact)
632
- await ctx.board.publish(artifact)
677
+ # Phase 6 SECURITY FIX: REMOVED publishing - orchestrator now handles it
678
+ # This fixes Vulnerability #2 (WRITE Bypass) - agents can no longer publish directly
679
+ # await ctx.board.publish(artifact)
633
680
 
634
681
  return produced
635
682
 
636
- async def _run_post_publish(self, ctx: Context, artifacts: Sequence[Artifact]) -> None:
683
+ async def _run_post_publish(
684
+ self, ctx: Context, artifacts: Sequence[Artifact]
685
+ ) -> None:
637
686
  components = self._sorted_utilities()
638
687
  for artifact in artifacts:
639
688
  for component in components:
@@ -718,7 +767,8 @@ class Agent(metaclass=AutoTracedMeta):
718
767
  return []
719
768
 
720
769
  default_engine = DSPyEngine(
721
- model=self._orchestrator.model or os.getenv("DEFAULT_MODEL", "openai/gpt-4.1"),
770
+ model=self._orchestrator.model
771
+ or os.getenv("DEFAULT_MODEL", "openai/gpt-4.1"),
722
772
  instructions=self.description,
723
773
  )
724
774
  self.engines = [default_engine]
@@ -830,11 +880,13 @@ class AgentBuilder:
830
880
  def consumes(
831
881
  self,
832
882
  *types: type[BaseModel],
833
- where: Callable[[BaseModel], bool] | Sequence[Callable[[BaseModel], bool]] | None = None,
883
+ where: Callable[[BaseModel], bool]
884
+ | Sequence[Callable[[BaseModel], bool]]
885
+ | None = None,
834
886
  text: str | None = None,
835
887
  min_p: float = 0.0,
836
888
  from_agents: Iterable[str] | None = None,
837
- channels: Iterable[str] | None = None,
889
+ tags: Iterable[str] | None = None,
838
890
  join: dict | JoinSpec | None = None,
839
891
  batch: dict | BatchSpec | None = None,
840
892
  delivery: str = "exclusive",
@@ -853,7 +905,7 @@ class AgentBuilder:
853
905
  text: Optional semantic text filter using embedding similarity
854
906
  min_p: Minimum probability threshold for text similarity (0.0-1.0)
855
907
  from_agents: Only consume artifacts from specific agents
856
- channels: Only consume artifacts with matching tags
908
+ tags: Only consume artifacts with matching tags
857
909
  join: Join specification for coordinating multiple artifact types
858
910
  batch: Batch specification for processing multiple artifacts together
859
911
  delivery: Delivery mode - "exclusive" (one agent) or "broadcast" (all matching)
@@ -876,23 +928,17 @@ class AgentBuilder:
876
928
  >>> # Multiple predicates (all must pass)
877
929
  >>> agent.consumes(
878
930
  ... Order,
879
- ... where=[
880
- ... lambda o: o.total > 100,
881
- ... lambda o: o.status == "pending"
882
- ... ]
931
+ ... where=[lambda o: o.total > 100, lambda o: o.status == "pending"],
883
932
  ... )
884
933
 
885
934
  >>> # Consume from specific agents
886
935
  >>> agent.consumes(Report, from_agents=["analyzer", "validator"])
887
936
 
888
937
  >>> # Channel-based routing
889
- >>> agent.consumes(Alert, channels={"critical", "security"})
938
+ >>> agent.consumes(Alert, tags={"critical", "security"})
890
939
 
891
940
  >>> # Batch processing
892
- >>> agent.consumes(
893
- ... Email,
894
- ... batch={"size": 10, "timeout": 5.0}
895
- ... )
941
+ >>> agent.consumes(Email, batch={"size": 10, "timeout": 5.0})
896
942
  """
897
943
  predicates: Sequence[Callable[[BaseModel], bool]] | None
898
944
  if where is None:
@@ -911,7 +957,7 @@ class AgentBuilder:
911
957
  where=predicates,
912
958
  text_predicates=text_predicates,
913
959
  from_agents=from_agents,
914
- channels=channels,
960
+ tags=tags,
915
961
  join=join_spec,
916
962
  batch=batch_spec,
917
963
  delivery=delivery,
@@ -927,7 +973,9 @@ class AgentBuilder:
927
973
  visibility: Visibility | Callable[[BaseModel], Visibility] | None = None,
928
974
  fan_out: int | None = None,
929
975
  where: Callable[[BaseModel], bool] | None = None,
930
- validate: Callable[[BaseModel], bool] | list[tuple[Callable, str]] | None = None,
976
+ validate: Callable[[BaseModel], bool]
977
+ | list[tuple[Callable, str]]
978
+ | None = None,
931
979
  description: str | None = None,
932
980
  ) -> PublishBuilder:
933
981
  """Declare which artifact types this agent produces.
@@ -945,11 +993,17 @@ class AgentBuilder:
945
993
 
946
994
  Examples:
947
995
  >>> agent.publishes(Report) # Publish 1 Report
948
- >>> agent.publishes(Task, Task, Task) # Publish 3 Tasks (duplicate counting)
996
+ >>> agent.publishes(
997
+ ... Task, Task, Task
998
+ ... ) # Publish 3 Tasks (duplicate counting)
949
999
  >>> agent.publishes(Task, fan_out=3) # Same as above (sugar syntax)
950
1000
  >>> agent.publishes(Task, where=lambda t: t.priority > 5) # With filtering
951
- >>> agent.publishes(Report, validate=lambda r: r.score > 0) # With validation
952
- >>> agent.publishes(Task, description="Special instructions") # With description
1001
+ >>> agent.publishes(
1002
+ ... Report, validate=lambda r: r.score > 0
1003
+ ... ) # With validation
1004
+ >>> agent.publishes(
1005
+ ... Task, description="Special instructions"
1006
+ ... ) # With description
953
1007
 
954
1008
  See Also:
955
1009
  - PublicVisibility: Default, visible to all agents
@@ -1000,7 +1054,9 @@ class AgentBuilder:
1000
1054
  # Create OutputGroup from outputs
1001
1055
  group = OutputGroup(
1002
1056
  outputs=outputs,
1003
- shared_visibility=resolved_visibility if not callable(resolved_visibility) else None,
1057
+ shared_visibility=resolved_visibility
1058
+ if not callable(resolved_visibility)
1059
+ else None,
1004
1060
  group_description=description,
1005
1061
  )
1006
1062
 
@@ -1027,20 +1083,14 @@ class AgentBuilder:
1027
1083
 
1028
1084
  Examples:
1029
1085
  >>> # Rate limiting
1030
- >>> agent.with_utilities(
1031
- ... RateLimiter(max_calls=10, window=60)
1032
- ... )
1086
+ >>> agent.with_utilities(RateLimiter(max_calls=10, window=60))
1033
1087
 
1034
1088
  >>> # Budget control
1035
- >>> agent.with_utilities(
1036
- ... TokenBudget(max_tokens=10000)
1037
- ... )
1089
+ >>> agent.with_utilities(TokenBudget(max_tokens=10000))
1038
1090
 
1039
1091
  >>> # Multiple components (executed in order)
1040
1092
  >>> agent.with_utilities(
1041
- ... RateLimiter(max_calls=5),
1042
- ... MetricsCollector(),
1043
- ... CacheLayer(ttl=3600)
1093
+ ... RateLimiter(max_calls=5), MetricsCollector(), CacheLayer(ttl=3600)
1044
1094
  ... )
1045
1095
 
1046
1096
  See Also:
@@ -1066,19 +1116,14 @@ class AgentBuilder:
1066
1116
 
1067
1117
  Examples:
1068
1118
  >>> # DSPy engine with specific model
1069
- >>> agent.with_engines(
1070
- ... DSPyEngine(model="openai/gpt-4o")
1071
- ... )
1119
+ >>> agent.with_engines(DSPyEngine(model="openai/gpt-4o"))
1072
1120
 
1073
1121
  >>> # Custom non-LLM engine
1074
- >>> agent.with_engines(
1075
- ... RuleBasedEngine(rules=my_rules)
1076
- ... )
1122
+ >>> agent.with_engines(RuleBasedEngine(rules=my_rules))
1077
1123
 
1078
1124
  >>> # Hybrid approach (multiple engines)
1079
1125
  >>> agent.with_engines(
1080
- ... DSPyEngine(model="openai/gpt-4o-mini"),
1081
- ... FallbackEngine()
1126
+ ... DSPyEngine(model="openai/gpt-4o-mini"), FallbackEngine()
1082
1127
  ... )
1083
1128
 
1084
1129
  Note:
@@ -1113,6 +1158,39 @@ class AgentBuilder:
1113
1158
  self._agent.tools.update(funcs)
1114
1159
  return self
1115
1160
 
1161
+ def with_context(self, provider: Any) -> AgentBuilder:
1162
+ """Configure a custom context provider for this agent (Phase 3 security fix).
1163
+
1164
+ Context providers control what artifacts an agent can see, enforcing
1165
+ visibility filtering at the security boundary layer.
1166
+
1167
+ Args:
1168
+ provider: ContextProvider instance for this agent
1169
+
1170
+ Returns:
1171
+ self for method chaining
1172
+
1173
+ Examples:
1174
+ >>> # Use custom provider for this agent
1175
+ >>> agent.with_context(MyCustomProvider())
1176
+
1177
+ >>> # Use FilteredContextProvider for declarative filtering
1178
+ >>> agent.with_context(
1179
+ ... FilteredContextProvider(FilterConfig(tags={"important"}))
1180
+ ... )
1181
+
1182
+ Note:
1183
+ Per-agent provider takes precedence over global provider configured
1184
+ on Flock(context_provider=...). If neither is set, DefaultContextProvider
1185
+ is used automatically.
1186
+
1187
+ See Also:
1188
+ - DefaultContextProvider: Default security boundary with visibility enforcement
1189
+ - FilteredContextProvider: Declarative filtering with FilterConfig
1190
+ """
1191
+ self._agent.context_provider = provider
1192
+ return self
1193
+
1116
1194
  def with_mcps(
1117
1195
  self,
1118
1196
  servers: (
@@ -1144,8 +1222,11 @@ class AgentBuilder:
1144
1222
 
1145
1223
  >>> # New format: Server-specific config with roots and tool whitelist
1146
1224
  >>> agent.with_mcps({
1147
- ... "filesystem": {"roots": ["/workspace/dir/data"], "tool_whitelist": ["read_file"]},
1148
- ... "github": {} # No restrictions for github
1225
+ ... "filesystem": {
1226
+ ... "roots": ["/workspace/dir/data"],
1227
+ ... "tool_whitelist": ["read_file"],
1228
+ ... },
1229
+ ... "github": {}, # No restrictions for github
1149
1230
  ... })
1150
1231
 
1151
1232
  >>> # Old format: Direct list (backward compatible)
@@ -1180,7 +1261,11 @@ class AgentBuilder:
1180
1261
  elif isinstance(server_config, dict):
1181
1262
  # New format: MCPServerConfig with optional roots and tool_whitelist
1182
1263
  mounts = server_config.get("roots", None)
1183
- if mounts is not None and isinstance(mounts, list) and len(mounts) > 0:
1264
+ if (
1265
+ mounts is not None
1266
+ and isinstance(mounts, list)
1267
+ and len(mounts) > 0
1268
+ ):
1184
1269
  server_mounts[server_name] = list(mounts)
1185
1270
 
1186
1271
  config_whitelist = server_config.get("tool_whitelist", None)
@@ -1341,7 +1426,9 @@ class AgentBuilder:
1341
1426
 
1342
1427
  # Get types agent publishes
1343
1428
  publishing_types = {
1344
- output.spec.type_name for group in self._agent.output_groups for output in group.outputs
1429
+ output.spec.type_name
1430
+ for group in self._agent.output_groups
1431
+ for output in group.outputs
1345
1432
  }
1346
1433
 
1347
1434
  # Check for overlap
flock/api/themes.py CHANGED
@@ -59,7 +59,9 @@ async def get_theme(theme_name: str) -> dict[str, Any]:
59
59
  theme_path = THEMES_DIR / f"{theme_name}.toml"
60
60
 
61
61
  if not theme_path.exists():
62
- raise HTTPException(status_code=404, detail=f"Theme '{theme_name}' not found")
62
+ raise HTTPException(
63
+ status_code=404, detail=f"Theme '{theme_name}' not found"
64
+ )
63
65
 
64
66
  # Load TOML theme
65
67
  theme_data = toml.load(theme_path)
@@ -68,4 +70,6 @@ async def get_theme(theme_name: str) -> dict[str, Any]:
68
70
  except HTTPException:
69
71
  raise
70
72
  except Exception as e:
71
- raise HTTPException(status_code=500, detail=f"Failed to load theme '{theme_name}': {e!s}")
73
+ raise HTTPException(
74
+ status_code=500, detail=f"Failed to load theme '{theme_name}': {e!s}"
75
+ )