flock-core 0.5.11__py3-none-any.whl → 0.5.20__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/__init__.py +1 -1
- flock/agent/__init__.py +30 -0
- flock/agent/builder_helpers.py +192 -0
- flock/agent/builder_validator.py +169 -0
- flock/agent/component_lifecycle.py +325 -0
- flock/agent/context_resolver.py +141 -0
- flock/agent/mcp_integration.py +212 -0
- flock/agent/output_processor.py +304 -0
- flock/api/__init__.py +20 -0
- flock/{api_models.py → api/models.py} +0 -2
- flock/{service.py → api/service.py} +3 -3
- flock/cli.py +2 -2
- flock/components/__init__.py +41 -0
- flock/components/agent/__init__.py +22 -0
- flock/{components.py → components/agent/base.py} +4 -3
- flock/{utility/output_utility_component.py → components/agent/output_utility.py} +12 -7
- flock/components/orchestrator/__init__.py +22 -0
- flock/{orchestrator_component.py → components/orchestrator/base.py} +5 -293
- flock/components/orchestrator/circuit_breaker.py +95 -0
- flock/components/orchestrator/collection.py +143 -0
- flock/components/orchestrator/deduplication.py +78 -0
- flock/core/__init__.py +30 -0
- flock/core/agent.py +953 -0
- flock/{artifacts.py → core/artifacts.py} +1 -1
- flock/{context_provider.py → core/context_provider.py} +3 -3
- flock/core/orchestrator.py +1102 -0
- flock/{store.py → core/store.py} +99 -454
- flock/{subscription.py → core/subscription.py} +1 -1
- flock/dashboard/collector.py +5 -5
- flock/dashboard/graph_builder.py +7 -7
- flock/dashboard/routes/__init__.py +21 -0
- flock/dashboard/routes/control.py +327 -0
- flock/dashboard/routes/helpers.py +340 -0
- flock/dashboard/routes/themes.py +76 -0
- flock/dashboard/routes/traces.py +521 -0
- flock/dashboard/routes/websocket.py +108 -0
- flock/dashboard/service.py +43 -1316
- flock/engines/dspy/__init__.py +20 -0
- flock/engines/dspy/artifact_materializer.py +216 -0
- flock/engines/dspy/signature_builder.py +474 -0
- flock/engines/dspy/streaming_executor.py +858 -0
- flock/engines/dspy_engine.py +45 -1330
- flock/engines/examples/simple_batch_engine.py +2 -2
- flock/examples.py +7 -7
- flock/logging/logging.py +1 -16
- flock/models/__init__.py +10 -0
- flock/orchestrator/__init__.py +45 -0
- flock/{artifact_collector.py → orchestrator/artifact_collector.py} +3 -3
- flock/orchestrator/artifact_manager.py +168 -0
- flock/{batch_accumulator.py → orchestrator/batch_accumulator.py} +2 -2
- flock/orchestrator/component_runner.py +389 -0
- flock/orchestrator/context_builder.py +167 -0
- flock/{correlation_engine.py → orchestrator/correlation_engine.py} +2 -2
- flock/orchestrator/event_emitter.py +167 -0
- flock/orchestrator/initialization.py +184 -0
- flock/orchestrator/lifecycle_manager.py +226 -0
- flock/orchestrator/mcp_manager.py +202 -0
- flock/orchestrator/scheduler.py +189 -0
- flock/orchestrator/server_manager.py +234 -0
- flock/orchestrator/tracing.py +147 -0
- flock/storage/__init__.py +10 -0
- flock/storage/artifact_aggregator.py +158 -0
- flock/storage/in_memory/__init__.py +6 -0
- flock/storage/in_memory/artifact_filter.py +114 -0
- flock/storage/in_memory/history_aggregator.py +115 -0
- flock/storage/sqlite/__init__.py +10 -0
- flock/storage/sqlite/agent_history_queries.py +154 -0
- flock/storage/sqlite/consumption_loader.py +100 -0
- flock/storage/sqlite/query_builder.py +112 -0
- flock/storage/sqlite/query_params_builder.py +91 -0
- flock/storage/sqlite/schema_manager.py +168 -0
- flock/storage/sqlite/summary_queries.py +194 -0
- flock/utils/__init__.py +14 -0
- flock/utils/async_utils.py +67 -0
- flock/{runtime.py → utils/runtime.py} +3 -3
- flock/utils/time_utils.py +53 -0
- flock/utils/type_resolution.py +38 -0
- flock/{utilities.py → utils/utilities.py} +2 -2
- flock/utils/validation.py +57 -0
- flock/utils/visibility.py +79 -0
- flock/utils/visibility_utils.py +134 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/METADATA +18 -4
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/RECORD +89 -33
- flock/agent.py +0 -1578
- flock/orchestrator.py +0 -1983
- /flock/{visibility.py → core/visibility.py} +0 -0
- /flock/{system_artifacts.py → models/system_artifacts.py} +0 -0
- /flock/{helper → utils}/cli_helper.py +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/WHEEL +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/entry_points.txt +0 -0
- {flock_core-0.5.11.dist-info → flock_core-0.5.20.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,340 @@
|
|
|
1
|
+
"""Helper functions for dashboard routes."""
|
|
2
|
+
|
|
3
|
+
from datetime import UTC, datetime, timedelta
|
|
4
|
+
from typing import TYPE_CHECKING, Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
if TYPE_CHECKING:
|
|
8
|
+
from flock.agent import Agent
|
|
9
|
+
from flock.agent.specification import Subscription
|
|
10
|
+
from flock.core import Flock
|
|
11
|
+
from flock.orchestrator.batch_accumulator import BatchEngine, BatchSpec
|
|
12
|
+
from flock.orchestrator.correlation_engine import CorrelationEngine
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def _get_correlation_groups(
|
|
16
|
+
engine: "CorrelationEngine",
|
|
17
|
+
agent_name: str,
|
|
18
|
+
subscription_index: int,
|
|
19
|
+
) -> list[dict[str, Any]]:
|
|
20
|
+
"""Extract correlation group state from CorrelationEngine.
|
|
21
|
+
|
|
22
|
+
Returns waiting state for all correlation groups for the given agent subscription.
|
|
23
|
+
Used by enhanced /api/agents endpoint to expose JoinSpec waiting state.
|
|
24
|
+
|
|
25
|
+
Args:
|
|
26
|
+
engine: CorrelationEngine instance from orchestrator
|
|
27
|
+
agent_name: Name of the agent
|
|
28
|
+
subscription_index: Index of the subscription (for agents with multiple subscriptions)
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
List of correlation group states with progress metrics:
|
|
32
|
+
[
|
|
33
|
+
{
|
|
34
|
+
"correlation_key": "patient_123",
|
|
35
|
+
"created_at": "2025-10-13T14:30:00Z",
|
|
36
|
+
"elapsed_seconds": 45.2,
|
|
37
|
+
"expires_in_seconds": 254.8, # For time windows
|
|
38
|
+
"expires_in_artifacts": 7, # For count windows
|
|
39
|
+
"collected_types": {"XRayImage": 1, "LabResults": 0},
|
|
40
|
+
"required_types": {"XRayImage": 1, "LabResults": 1},
|
|
41
|
+
"waiting_for": ["LabResults"],
|
|
42
|
+
"is_complete": False,
|
|
43
|
+
"is_expired": False
|
|
44
|
+
},
|
|
45
|
+
...
|
|
46
|
+
]
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
pool_key = (agent_name, subscription_index)
|
|
50
|
+
groups = engine.correlation_groups.get(pool_key, {})
|
|
51
|
+
|
|
52
|
+
if not groups:
|
|
53
|
+
return []
|
|
54
|
+
|
|
55
|
+
now = datetime.now(UTC)
|
|
56
|
+
result = []
|
|
57
|
+
|
|
58
|
+
for corr_key, group in groups.items():
|
|
59
|
+
# Calculate elapsed time
|
|
60
|
+
if group.created_at_time:
|
|
61
|
+
created_at_time = group.created_at_time
|
|
62
|
+
if created_at_time.tzinfo is None:
|
|
63
|
+
created_at_time = created_at_time.replace(tzinfo=UTC)
|
|
64
|
+
elapsed = (now - created_at_time).total_seconds()
|
|
65
|
+
else:
|
|
66
|
+
elapsed = 0
|
|
67
|
+
|
|
68
|
+
# Calculate time remaining (for time windows)
|
|
69
|
+
expires_in_seconds = None
|
|
70
|
+
if isinstance(group.window_spec, timedelta):
|
|
71
|
+
window_seconds = group.window_spec.total_seconds()
|
|
72
|
+
expires_in_seconds = max(0, window_seconds - elapsed)
|
|
73
|
+
|
|
74
|
+
# Calculate artifact count remaining (for count windows)
|
|
75
|
+
expires_in_artifacts = None
|
|
76
|
+
if isinstance(group.window_spec, int):
|
|
77
|
+
artifacts_passed = engine.global_sequence - group.created_at_sequence
|
|
78
|
+
expires_in_artifacts = max(0, group.window_spec - artifacts_passed)
|
|
79
|
+
|
|
80
|
+
# Determine what we're waiting for
|
|
81
|
+
collected_types = {
|
|
82
|
+
type_name: len(group.waiting_artifacts.get(type_name, []))
|
|
83
|
+
for type_name in group.required_types
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
waiting_for = [
|
|
87
|
+
type_name
|
|
88
|
+
for type_name, required_count in group.type_counts.items()
|
|
89
|
+
if collected_types.get(type_name, 0) < required_count
|
|
90
|
+
]
|
|
91
|
+
|
|
92
|
+
result.append({
|
|
93
|
+
"correlation_key": str(corr_key),
|
|
94
|
+
"created_at": group.created_at_time.isoformat()
|
|
95
|
+
if group.created_at_time
|
|
96
|
+
else None,
|
|
97
|
+
"elapsed_seconds": round(elapsed, 1),
|
|
98
|
+
"expires_in_seconds": round(expires_in_seconds, 1)
|
|
99
|
+
if expires_in_seconds is not None
|
|
100
|
+
else None,
|
|
101
|
+
"expires_in_artifacts": expires_in_artifacts,
|
|
102
|
+
"collected_types": collected_types,
|
|
103
|
+
"required_types": dict(group.type_counts),
|
|
104
|
+
"waiting_for": waiting_for,
|
|
105
|
+
"is_complete": group.is_complete(),
|
|
106
|
+
"is_expired": group.is_expired(engine.global_sequence),
|
|
107
|
+
})
|
|
108
|
+
|
|
109
|
+
return result
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def _get_batch_state(
|
|
113
|
+
engine: "BatchEngine",
|
|
114
|
+
agent_name: str,
|
|
115
|
+
subscription_index: int,
|
|
116
|
+
batch_spec: "BatchSpec",
|
|
117
|
+
) -> dict[str, Any] | None:
|
|
118
|
+
"""Extract batch state from BatchEngine.
|
|
119
|
+
|
|
120
|
+
Returns current batch accumulator state for the given agent subscription.
|
|
121
|
+
Used by enhanced /api/agents endpoint to expose BatchSpec waiting state.
|
|
122
|
+
|
|
123
|
+
Args:
|
|
124
|
+
engine: BatchEngine instance from orchestrator
|
|
125
|
+
agent_name: Name of the agent
|
|
126
|
+
subscription_index: Index of the subscription
|
|
127
|
+
batch_spec: BatchSpec configuration (needed for metrics)
|
|
128
|
+
|
|
129
|
+
Returns:
|
|
130
|
+
Batch state dict or None if no batch or batch is empty:
|
|
131
|
+
{
|
|
132
|
+
"created_at": "2025-10-13T14:30:00Z",
|
|
133
|
+
"elapsed_seconds": 12.5,
|
|
134
|
+
"items_collected": 18,
|
|
135
|
+
"items_target": 25,
|
|
136
|
+
"items_remaining": 7,
|
|
137
|
+
"timeout_seconds": 30,
|
|
138
|
+
"timeout_remaining_seconds": 17.5,
|
|
139
|
+
"will_flush": "on_size" | "on_timeout" | "unknown"
|
|
140
|
+
}
|
|
141
|
+
"""
|
|
142
|
+
|
|
143
|
+
batch_key = (agent_name, subscription_index)
|
|
144
|
+
accumulator = engine.batches.get(batch_key)
|
|
145
|
+
|
|
146
|
+
# Return None if no batch or batch is empty
|
|
147
|
+
if not accumulator or not accumulator.artifacts:
|
|
148
|
+
return None
|
|
149
|
+
|
|
150
|
+
now = datetime.now(UTC)
|
|
151
|
+
# Ensure accumulator.created_at is timezone-aware
|
|
152
|
+
created_at = accumulator.created_at
|
|
153
|
+
if created_at.tzinfo is None:
|
|
154
|
+
created_at = created_at.replace(tzinfo=UTC)
|
|
155
|
+
elapsed = (now - created_at).total_seconds()
|
|
156
|
+
|
|
157
|
+
# Calculate items collected (needed for all batch types)
|
|
158
|
+
items_collected = len(accumulator.artifacts)
|
|
159
|
+
# For group batching, use _group_count if available
|
|
160
|
+
if hasattr(accumulator, "_group_count"):
|
|
161
|
+
items_collected = accumulator._group_count
|
|
162
|
+
|
|
163
|
+
result = {
|
|
164
|
+
"created_at": accumulator.created_at.isoformat(),
|
|
165
|
+
"elapsed_seconds": round(elapsed, 1),
|
|
166
|
+
"items_collected": items_collected, # Always include for all batch types
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
# Size-based metrics (only if size threshold configured)
|
|
170
|
+
if batch_spec.size:
|
|
171
|
+
result["items_target"] = batch_spec.size
|
|
172
|
+
result["items_remaining"] = max(0, batch_spec.size - items_collected)
|
|
173
|
+
else:
|
|
174
|
+
# Timeout-only batches: no target
|
|
175
|
+
result["items_target"] = None
|
|
176
|
+
result["items_remaining"] = None
|
|
177
|
+
|
|
178
|
+
# Timeout-based metrics
|
|
179
|
+
if batch_spec.timeout:
|
|
180
|
+
timeout_seconds = batch_spec.timeout.total_seconds()
|
|
181
|
+
timeout_remaining = max(0, timeout_seconds - elapsed)
|
|
182
|
+
|
|
183
|
+
result["timeout_seconds"] = int(timeout_seconds)
|
|
184
|
+
result["timeout_remaining_seconds"] = round(timeout_remaining, 1)
|
|
185
|
+
|
|
186
|
+
# Determine what will trigger flush
|
|
187
|
+
if batch_spec.size and batch_spec.timeout:
|
|
188
|
+
# Hybrid: predict which will fire first based on progress percentages
|
|
189
|
+
items_collected = result["items_collected"]
|
|
190
|
+
items_target = result.get("items_target", 1)
|
|
191
|
+
timeout_remaining = result.get("timeout_remaining_seconds", 0)
|
|
192
|
+
|
|
193
|
+
# Calculate progress toward each threshold
|
|
194
|
+
size_progress = items_collected / items_target if items_target > 0 else 0
|
|
195
|
+
timeout_elapsed = elapsed
|
|
196
|
+
timeout_total = batch_spec.timeout.total_seconds()
|
|
197
|
+
time_progress = timeout_elapsed / timeout_total if timeout_total > 0 else 0
|
|
198
|
+
|
|
199
|
+
# Predict based on which threshold we're progressing toward faster
|
|
200
|
+
# If we're closer to size threshold (percentage-wise), predict size
|
|
201
|
+
# Otherwise predict timeout
|
|
202
|
+
if size_progress > time_progress:
|
|
203
|
+
result["will_flush"] = "on_size"
|
|
204
|
+
else:
|
|
205
|
+
result["will_flush"] = "on_timeout"
|
|
206
|
+
elif batch_spec.size:
|
|
207
|
+
result["will_flush"] = "on_size"
|
|
208
|
+
elif batch_spec.timeout:
|
|
209
|
+
result["will_flush"] = "on_timeout"
|
|
210
|
+
|
|
211
|
+
return result
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def _compute_agent_status(agent: "Agent", orchestrator: "Flock") -> str:
|
|
215
|
+
"""Determine agent status based on waiting state.
|
|
216
|
+
|
|
217
|
+
Checks if agent is waiting for correlation or batch completion.
|
|
218
|
+
Used by enhanced /api/agents endpoint to show agent status.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
agent: Agent instance
|
|
222
|
+
orchestrator: Flock orchestrator instance
|
|
223
|
+
|
|
224
|
+
Returns:
|
|
225
|
+
"ready" - Agent not waiting for anything
|
|
226
|
+
"waiting" - Agent has correlation groups or batches accumulating
|
|
227
|
+
"active" - Agent currently executing (future enhancement)
|
|
228
|
+
"""
|
|
229
|
+
# Check if any subscription is waiting for correlation or batching
|
|
230
|
+
for idx, subscription in enumerate(agent.subscriptions):
|
|
231
|
+
if subscription.join:
|
|
232
|
+
pool_key = (agent.name, idx)
|
|
233
|
+
if pool_key in orchestrator._correlation_engine.correlation_groups:
|
|
234
|
+
groups = orchestrator._correlation_engine.correlation_groups[pool_key]
|
|
235
|
+
if groups: # Has waiting correlation groups
|
|
236
|
+
return "waiting"
|
|
237
|
+
|
|
238
|
+
if subscription.batch:
|
|
239
|
+
batch_key = (agent.name, idx)
|
|
240
|
+
if batch_key in orchestrator._batch_engine.batches:
|
|
241
|
+
accumulator = orchestrator._batch_engine.batches[batch_key]
|
|
242
|
+
if accumulator and accumulator.artifacts:
|
|
243
|
+
return "waiting"
|
|
244
|
+
|
|
245
|
+
return "ready"
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _build_logic_config(
|
|
249
|
+
agent: "Agent",
|
|
250
|
+
subscription: "Subscription",
|
|
251
|
+
idx: int,
|
|
252
|
+
orchestrator: "Flock",
|
|
253
|
+
) -> dict[str, Any] | None:
|
|
254
|
+
"""Build logic operations configuration for a subscription.
|
|
255
|
+
|
|
256
|
+
Phase 1.2: Extracts JoinSpec and BatchSpec configuration plus current
|
|
257
|
+
waiting state for agents using logic operations.
|
|
258
|
+
|
|
259
|
+
Args:
|
|
260
|
+
agent: Agent instance
|
|
261
|
+
subscription: Subscription to analyze
|
|
262
|
+
idx: Subscription index (for agents with multiple subscriptions)
|
|
263
|
+
orchestrator: Flock orchestrator instance
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
Logic operations config dict or None if no join/batch:
|
|
267
|
+
{
|
|
268
|
+
"subscription_index": 0,
|
|
269
|
+
"subscription_types": ["XRayImage", "LabResults"],
|
|
270
|
+
"join": {...}, # JoinSpec config (if present)
|
|
271
|
+
"batch": {...}, # BatchSpec config (if present)
|
|
272
|
+
"waiting_state": {...} # Current state (if waiting)
|
|
273
|
+
}
|
|
274
|
+
"""
|
|
275
|
+
if not subscription.join and not subscription.batch:
|
|
276
|
+
return None
|
|
277
|
+
|
|
278
|
+
config = {
|
|
279
|
+
"subscription_index": idx,
|
|
280
|
+
"subscription_types": list(subscription.type_names),
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
# JoinSpec configuration
|
|
284
|
+
if subscription.join:
|
|
285
|
+
join_spec = subscription.join
|
|
286
|
+
window_type = "time" if isinstance(join_spec.within, timedelta) else "count"
|
|
287
|
+
window_value = (
|
|
288
|
+
int(join_spec.within.total_seconds())
|
|
289
|
+
if isinstance(join_spec.within, timedelta)
|
|
290
|
+
else join_spec.within
|
|
291
|
+
)
|
|
292
|
+
|
|
293
|
+
config["join"] = {
|
|
294
|
+
"correlation_strategy": "by_key",
|
|
295
|
+
"window_type": window_type,
|
|
296
|
+
"window_value": window_value,
|
|
297
|
+
"window_unit": "seconds" if window_type == "time" else "artifacts",
|
|
298
|
+
"required_types": list(subscription.type_names),
|
|
299
|
+
"type_counts": dict(subscription.type_counts),
|
|
300
|
+
}
|
|
301
|
+
|
|
302
|
+
# Get waiting state from CorrelationEngine
|
|
303
|
+
correlation_groups = _get_correlation_groups(
|
|
304
|
+
orchestrator._correlation_engine, agent.name, idx
|
|
305
|
+
)
|
|
306
|
+
if correlation_groups:
|
|
307
|
+
config["waiting_state"] = {
|
|
308
|
+
"is_waiting": True,
|
|
309
|
+
"correlation_groups": correlation_groups,
|
|
310
|
+
}
|
|
311
|
+
|
|
312
|
+
# BatchSpec configuration
|
|
313
|
+
if subscription.batch:
|
|
314
|
+
batch_spec = subscription.batch
|
|
315
|
+
strategy = (
|
|
316
|
+
"hybrid"
|
|
317
|
+
if batch_spec.size and batch_spec.timeout
|
|
318
|
+
else "size"
|
|
319
|
+
if batch_spec.size
|
|
320
|
+
else "timeout"
|
|
321
|
+
)
|
|
322
|
+
|
|
323
|
+
config["batch"] = {
|
|
324
|
+
"strategy": strategy,
|
|
325
|
+
}
|
|
326
|
+
if batch_spec.size:
|
|
327
|
+
config["batch"]["size"] = batch_spec.size
|
|
328
|
+
if batch_spec.timeout:
|
|
329
|
+
config["batch"]["timeout_seconds"] = int(batch_spec.timeout.total_seconds())
|
|
330
|
+
|
|
331
|
+
# Get waiting state from BatchEngine
|
|
332
|
+
batch_state = _get_batch_state(
|
|
333
|
+
orchestrator._batch_engine, agent.name, idx, batch_spec
|
|
334
|
+
)
|
|
335
|
+
if batch_state:
|
|
336
|
+
if "waiting_state" not in config:
|
|
337
|
+
config["waiting_state"] = {"is_waiting": True}
|
|
338
|
+
config["waiting_state"]["batch_state"] = batch_state
|
|
339
|
+
|
|
340
|
+
return config
|
|
@@ -0,0 +1,76 @@
|
|
|
1
|
+
"""Theme management API routes for dashboard."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
import toml
|
|
7
|
+
from fastapi import FastAPI, HTTPException
|
|
8
|
+
|
|
9
|
+
from flock.logging.logging import get_logger
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
logger = get_logger("dashboard.routes.themes")
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def register_theme_routes(app: FastAPI) -> None:
|
|
16
|
+
"""Register theme API endpoints for dashboard customization.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
app: FastAPI application instance
|
|
20
|
+
"""
|
|
21
|
+
themes_dir = Path(__file__).parent.parent.parent / "themes"
|
|
22
|
+
|
|
23
|
+
@app.get("/api/themes")
|
|
24
|
+
async def list_themes() -> dict[str, Any]:
|
|
25
|
+
"""Get list of available theme names.
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
{"themes": ["dracula", "nord", ...]}
|
|
29
|
+
"""
|
|
30
|
+
try:
|
|
31
|
+
if not themes_dir.exists():
|
|
32
|
+
return {"themes": []}
|
|
33
|
+
|
|
34
|
+
theme_files = list(themes_dir.glob("*.toml"))
|
|
35
|
+
theme_names = sorted([f.stem for f in theme_files])
|
|
36
|
+
|
|
37
|
+
return {"themes": theme_names}
|
|
38
|
+
except Exception as e:
|
|
39
|
+
logger.exception(f"Failed to list themes: {e}")
|
|
40
|
+
raise HTTPException(status_code=500, detail=f"Failed to list themes: {e!s}")
|
|
41
|
+
|
|
42
|
+
@app.get("/api/themes/{theme_name}")
|
|
43
|
+
async def get_theme(theme_name: str) -> dict[str, Any]:
|
|
44
|
+
"""Get theme data by name.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
theme_name: Name of theme (without .toml extension)
|
|
48
|
+
|
|
49
|
+
Returns:
|
|
50
|
+
{
|
|
51
|
+
"name": "dracula",
|
|
52
|
+
"data": {
|
|
53
|
+
"colors": {...}
|
|
54
|
+
}
|
|
55
|
+
}
|
|
56
|
+
"""
|
|
57
|
+
try:
|
|
58
|
+
# Sanitize theme name to prevent path traversal
|
|
59
|
+
theme_name = theme_name.replace("/", "").replace("\\", "").replace("..", "")
|
|
60
|
+
|
|
61
|
+
theme_path = themes_dir / f"{theme_name}.toml"
|
|
62
|
+
|
|
63
|
+
if not theme_path.exists():
|
|
64
|
+
raise HTTPException(
|
|
65
|
+
status_code=404, detail=f"Theme '{theme_name}' not found"
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
# Load TOML theme
|
|
69
|
+
theme_data = toml.load(theme_path)
|
|
70
|
+
|
|
71
|
+
return {"name": theme_name, "data": theme_data}
|
|
72
|
+
except HTTPException:
|
|
73
|
+
raise
|
|
74
|
+
except Exception as e:
|
|
75
|
+
logger.exception(f"Failed to load theme '{theme_name}': {e}")
|
|
76
|
+
raise HTTPException(status_code=500, detail=f"Failed to load theme: {e!s}")
|