flock-core 0.5.10__py3-none-any.whl → 0.5.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of flock-core might be problematic. Click here for more details.
- flock/api_models.py +285 -0
- flock/dashboard/service.py +44 -21
- flock/orchestrator.py +246 -9
- flock/service.py +119 -61
- flock/system_artifacts.py +33 -0
- {flock_core-0.5.10.dist-info → flock_core-0.5.11.dist-info}/METADATA +52 -58
- {flock_core-0.5.10.dist-info → flock_core-0.5.11.dist-info}/RECORD +10 -8
- {flock_core-0.5.10.dist-info → flock_core-0.5.11.dist-info}/WHEEL +0 -0
- {flock_core-0.5.10.dist-info → flock_core-0.5.11.dist-info}/entry_points.txt +0 -0
- {flock_core-0.5.10.dist-info → flock_core-0.5.11.dist-info}/licenses/LICENSE +0 -0
flock/api_models.py
ADDED
|
@@ -0,0 +1,285 @@
|
|
|
1
|
+
"""Pydantic response models for Flock REST API.
|
|
2
|
+
|
|
3
|
+
Provides proper OpenAPI schemas for all public API endpoints.
|
|
4
|
+
This improves API documentation and enables SDK generation.
|
|
5
|
+
|
|
6
|
+
All models maintain 100% backwards compatibility with existing wire format.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from datetime import datetime
|
|
10
|
+
from typing import Any, Literal
|
|
11
|
+
from uuid import UUID
|
|
12
|
+
|
|
13
|
+
from pydantic import BaseModel, Field
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
# ============================================================================
|
|
17
|
+
# Agent Models
|
|
18
|
+
# ============================================================================
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class AgentSubscription(BaseModel):
|
|
22
|
+
"""Subscription configuration for an agent."""
|
|
23
|
+
|
|
24
|
+
types: list[str] = Field(description="Artifact types this subscription consumes")
|
|
25
|
+
mode: str = Field(description="Subscription mode (e.g., 'all', 'any')")
|
|
26
|
+
delivery: str = Field(description="Delivery mode (e.g., 'immediate', 'batch')")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class Agent(BaseModel):
|
|
30
|
+
"""Single agent representation."""
|
|
31
|
+
|
|
32
|
+
name: str = Field(description="Unique name of the agent")
|
|
33
|
+
description: str = Field(default="", description="Human-readable description")
|
|
34
|
+
subscriptions: list[AgentSubscription] = Field(
|
|
35
|
+
description="List of subscriptions this agent listens to"
|
|
36
|
+
)
|
|
37
|
+
outputs: list[str] = Field(description="Artifact types this agent can produce")
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class AgentListResponse(BaseModel):
|
|
41
|
+
"""Response for GET /api/v1/agents."""
|
|
42
|
+
|
|
43
|
+
agents: list[Agent] = Field(description="List of all registered agents")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
# ============================================================================
|
|
47
|
+
# Artifact Models
|
|
48
|
+
# ============================================================================
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class VisibilityInfo(BaseModel):
|
|
52
|
+
"""Artifact visibility configuration."""
|
|
53
|
+
|
|
54
|
+
kind: str = Field(description="Visibility kind (e.g., 'Public', 'Private')")
|
|
55
|
+
# Additional visibility fields added dynamically
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class ArtifactBase(BaseModel):
|
|
59
|
+
"""Base artifact representation with common fields."""
|
|
60
|
+
|
|
61
|
+
id: str = Field(description="Unique artifact identifier (UUID)")
|
|
62
|
+
type: str = Field(description="Artifact type name")
|
|
63
|
+
payload: dict[str, Any] = Field(description="Artifact payload data")
|
|
64
|
+
produced_by: str = Field(description="Name of agent/source that produced this")
|
|
65
|
+
visibility: dict[str, Any] = Field(description="Visibility configuration")
|
|
66
|
+
visibility_kind: str = Field(description="Visibility kind (Public/Private/etc)")
|
|
67
|
+
created_at: str = Field(
|
|
68
|
+
description="Timestamp when artifact was created (ISO 8601)"
|
|
69
|
+
)
|
|
70
|
+
correlation_id: str | None = Field(
|
|
71
|
+
None, description="Optional correlation ID for workflow tracking"
|
|
72
|
+
)
|
|
73
|
+
partition_key: str | None = Field(None, description="Optional partition key")
|
|
74
|
+
tags: list[str] = Field(default_factory=list, description="List of tags")
|
|
75
|
+
version: int = Field(description="Artifact version number")
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class ConsumptionRecord(BaseModel):
|
|
79
|
+
"""Record of an artifact being consumed by an agent."""
|
|
80
|
+
|
|
81
|
+
artifact_id: str = Field(description="ID of the artifact that was consumed")
|
|
82
|
+
consumer: str = Field(description="Name of the agent that consumed it")
|
|
83
|
+
run_id: str = Field(description="Run ID of the consumption")
|
|
84
|
+
correlation_id: str = Field(description="Correlation ID of the consumption")
|
|
85
|
+
consumed_at: str = Field(description="Timestamp of consumption (ISO 8601)")
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
class ArtifactWithConsumptions(ArtifactBase):
|
|
89
|
+
"""Artifact with consumption metadata included."""
|
|
90
|
+
|
|
91
|
+
consumptions: list[ConsumptionRecord] = Field(
|
|
92
|
+
default_factory=list, description="List of consumption records"
|
|
93
|
+
)
|
|
94
|
+
consumed_by: list[str] = Field(
|
|
95
|
+
default_factory=list,
|
|
96
|
+
description="List of unique agent names that consumed this artifact",
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class PaginationInfo(BaseModel):
|
|
101
|
+
"""Pagination metadata."""
|
|
102
|
+
|
|
103
|
+
limit: int = Field(description="Number of items per page")
|
|
104
|
+
offset: int = Field(description="Offset into the result set")
|
|
105
|
+
total: int = Field(description="Total number of items matching the query")
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
class ArtifactListResponse(BaseModel):
|
|
109
|
+
"""Response for GET /api/v1/artifacts."""
|
|
110
|
+
|
|
111
|
+
items: list[ArtifactBase | ArtifactWithConsumptions] = Field(
|
|
112
|
+
description="List of artifacts (may include consumption data if embed_meta=true)"
|
|
113
|
+
)
|
|
114
|
+
pagination: PaginationInfo = Field(description="Pagination information")
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class ArtifactPublishRequest(BaseModel):
|
|
118
|
+
"""Request body for POST /api/v1/artifacts."""
|
|
119
|
+
|
|
120
|
+
type: str = Field(description="Artifact type name")
|
|
121
|
+
payload: dict[str, Any] = Field(
|
|
122
|
+
default_factory=dict, description="Artifact payload data"
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class ArtifactPublishResponse(BaseModel):
|
|
127
|
+
"""Response for POST /api/v1/artifacts."""
|
|
128
|
+
|
|
129
|
+
status: Literal["accepted"] = Field(description="Publication status")
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class ArtifactSummary(BaseModel):
|
|
133
|
+
"""Summary statistics for artifacts."""
|
|
134
|
+
|
|
135
|
+
# Define based on actual summary structure from store
|
|
136
|
+
# This is a placeholder - update based on actual implementation
|
|
137
|
+
|
|
138
|
+
|
|
139
|
+
class ArtifactSummaryResponse(BaseModel):
|
|
140
|
+
"""Response for GET /api/v1/artifacts/summary."""
|
|
141
|
+
|
|
142
|
+
summary: dict[str, Any] = Field(description="Summary statistics")
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
# ============================================================================
|
|
146
|
+
# Agent Run Models
|
|
147
|
+
# ============================================================================
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
class AgentRunInput(BaseModel):
|
|
151
|
+
"""Input artifact for agent run."""
|
|
152
|
+
|
|
153
|
+
type: str = Field(description="Artifact type name")
|
|
154
|
+
payload: dict[str, Any] = Field(
|
|
155
|
+
default_factory=dict, description="Artifact payload data"
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
class AgentRunRequest(BaseModel):
|
|
160
|
+
"""Request body for POST /api/v1/agents/{name}/run."""
|
|
161
|
+
|
|
162
|
+
inputs: list[AgentRunInput] = Field(
|
|
163
|
+
default_factory=list, description="List of input artifacts"
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
class ProducedArtifact(BaseModel):
|
|
168
|
+
"""Artifact produced by agent run."""
|
|
169
|
+
|
|
170
|
+
id: str = Field(description="Artifact ID (UUID)")
|
|
171
|
+
type: str = Field(description="Artifact type name")
|
|
172
|
+
payload: dict[str, Any] = Field(description="Artifact payload data")
|
|
173
|
+
produced_by: str = Field(description="Name of agent that produced this")
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
class AgentRunResponse(BaseModel):
|
|
177
|
+
"""Response for POST /api/v1/agents/{name}/run."""
|
|
178
|
+
|
|
179
|
+
artifacts: list[ProducedArtifact] = Field(
|
|
180
|
+
description="Artifacts produced by the agent run"
|
|
181
|
+
)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
# ============================================================================
|
|
185
|
+
# Schema Discovery Models
|
|
186
|
+
# ============================================================================
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class ArtifactTypeSchema(BaseModel):
|
|
190
|
+
"""Schema information for an artifact type."""
|
|
191
|
+
|
|
192
|
+
model_config = {"populate_by_name": True} # Allow using 'schema' as field name
|
|
193
|
+
|
|
194
|
+
name: str = Field(description="Type name")
|
|
195
|
+
schema_: dict[str, Any] = Field(
|
|
196
|
+
alias="schema", description="JSON Schema for this type"
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
class ArtifactTypesResponse(BaseModel):
|
|
201
|
+
"""Response for GET /api/artifact-types."""
|
|
202
|
+
|
|
203
|
+
artifact_types: list[ArtifactTypeSchema] = Field(
|
|
204
|
+
description="List of all registered artifact types with their schemas"
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
|
|
208
|
+
# ============================================================================
|
|
209
|
+
# Agent History Models
|
|
210
|
+
# ============================================================================
|
|
211
|
+
|
|
212
|
+
|
|
213
|
+
class AgentHistorySummary(BaseModel):
|
|
214
|
+
"""Summary of agent execution history."""
|
|
215
|
+
|
|
216
|
+
agent_id: str = Field(description="Agent identifier")
|
|
217
|
+
summary: dict[str, Any] = Field(description="History summary statistics")
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
# ============================================================================
|
|
221
|
+
# Correlation Status Models
|
|
222
|
+
# ============================================================================
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
class CorrelationStatusResponse(BaseModel):
|
|
226
|
+
"""Response for GET /api/v1/correlations/{correlation_id}/status."""
|
|
227
|
+
|
|
228
|
+
correlation_id: str = Field(description="The correlation ID")
|
|
229
|
+
state: Literal["active", "completed", "failed", "not_found"] = Field(
|
|
230
|
+
description="Workflow state: active (work pending), completed (success), failed (only errors), not_found (no artifacts)"
|
|
231
|
+
)
|
|
232
|
+
has_pending_work: bool = Field(
|
|
233
|
+
description="Whether the orchestrator has pending work for this correlation"
|
|
234
|
+
)
|
|
235
|
+
artifact_count: int = Field(
|
|
236
|
+
description="Total number of artifacts with this correlation_id"
|
|
237
|
+
)
|
|
238
|
+
error_count: int = Field(description="Number of WorkflowError artifacts")
|
|
239
|
+
started_at: str | None = Field(
|
|
240
|
+
None, description="Timestamp of first artifact (ISO 8601)"
|
|
241
|
+
)
|
|
242
|
+
last_activity_at: str | None = Field(
|
|
243
|
+
None, description="Timestamp of most recent artifact (ISO 8601)"
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
# ============================================================================
|
|
248
|
+
# Health & Metrics Models
|
|
249
|
+
# ============================================================================
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class HealthResponse(BaseModel):
|
|
253
|
+
"""Response for GET /health."""
|
|
254
|
+
|
|
255
|
+
status: Literal["ok"] = Field(description="Health status")
|
|
256
|
+
|
|
257
|
+
|
|
258
|
+
__all__ = [
|
|
259
|
+
# Agent models
|
|
260
|
+
"Agent",
|
|
261
|
+
"AgentSubscription",
|
|
262
|
+
"AgentListResponse",
|
|
263
|
+
# Artifact models
|
|
264
|
+
"ArtifactBase",
|
|
265
|
+
"ArtifactWithConsumptions",
|
|
266
|
+
"ArtifactListResponse",
|
|
267
|
+
"ArtifactPublishRequest",
|
|
268
|
+
"ArtifactPublishResponse",
|
|
269
|
+
"ArtifactSummaryResponse",
|
|
270
|
+
"PaginationInfo",
|
|
271
|
+
"ConsumptionRecord",
|
|
272
|
+
# Agent run models
|
|
273
|
+
"AgentRunRequest",
|
|
274
|
+
"AgentRunResponse",
|
|
275
|
+
"ProducedArtifact",
|
|
276
|
+
# Schema discovery
|
|
277
|
+
"ArtifactTypesResponse",
|
|
278
|
+
"ArtifactTypeSchema",
|
|
279
|
+
# History
|
|
280
|
+
"AgentHistorySummary",
|
|
281
|
+
# Correlation status
|
|
282
|
+
"CorrelationStatusResponse",
|
|
283
|
+
# Health
|
|
284
|
+
"HealthResponse",
|
|
285
|
+
]
|
flock/dashboard/service.py
CHANGED
|
@@ -19,6 +19,7 @@ from fastapi.middleware.cors import CORSMiddleware
|
|
|
19
19
|
from fastapi.staticfiles import StaticFiles
|
|
20
20
|
from pydantic import ValidationError
|
|
21
21
|
|
|
22
|
+
from flock.api_models import ArtifactTypeSchema, ArtifactTypesResponse
|
|
22
23
|
from flock.dashboard.collector import DashboardEventCollector
|
|
23
24
|
from flock.dashboard.events import MessagePublishedEvent, VisibilitySpec
|
|
24
25
|
from flock.dashboard.graph_builder import GraphAssembler
|
|
@@ -61,6 +62,18 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
61
62
|
# Initialize base service
|
|
62
63
|
super().__init__(orchestrator)
|
|
63
64
|
|
|
65
|
+
# Add dashboard-specific tags to OpenAPI
|
|
66
|
+
self.app.openapi_tags.extend([
|
|
67
|
+
{
|
|
68
|
+
"name": "Dashboard UI",
|
|
69
|
+
"description": "**Internal endpoints** used by the Flock dashboard UI. Not intended for direct use.",
|
|
70
|
+
},
|
|
71
|
+
{
|
|
72
|
+
"name": "Schema Discovery",
|
|
73
|
+
"description": "Endpoints for discovering available artifact types and their schemas.",
|
|
74
|
+
},
|
|
75
|
+
])
|
|
76
|
+
|
|
64
77
|
# Initialize WebSocket manager and event collector
|
|
65
78
|
self.websocket_manager = websocket_manager or WebSocketManager()
|
|
66
79
|
self.event_collector = event_collector or DashboardEventCollector(
|
|
@@ -137,7 +150,11 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
137
150
|
|
|
138
151
|
if self.graph_assembler is not None:
|
|
139
152
|
|
|
140
|
-
@app.post(
|
|
153
|
+
@app.post(
|
|
154
|
+
"/api/dashboard/graph",
|
|
155
|
+
response_model=GraphSnapshot,
|
|
156
|
+
tags=["Dashboard UI"],
|
|
157
|
+
)
|
|
141
158
|
async def get_dashboard_graph(request: GraphRequest) -> GraphSnapshot:
|
|
142
159
|
"""Return server-side assembled dashboard graph snapshot."""
|
|
143
160
|
return await self.graph_assembler.build_snapshot(request)
|
|
@@ -174,8 +191,12 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
174
191
|
app = self.app
|
|
175
192
|
orchestrator = self.orchestrator
|
|
176
193
|
|
|
177
|
-
@app.get(
|
|
178
|
-
|
|
194
|
+
@app.get(
|
|
195
|
+
"/api/artifact-types",
|
|
196
|
+
response_model=ArtifactTypesResponse,
|
|
197
|
+
tags=["Schema Discovery"],
|
|
198
|
+
)
|
|
199
|
+
async def get_artifact_types() -> ArtifactTypesResponse:
|
|
179
200
|
"""Get all registered artifact types with their schemas.
|
|
180
201
|
|
|
181
202
|
Returns:
|
|
@@ -196,13 +217,15 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
196
217
|
model_class = type_registry.resolve(type_name)
|
|
197
218
|
# Get Pydantic schema
|
|
198
219
|
schema = model_class.model_json_schema()
|
|
199
|
-
artifact_types.append(
|
|
220
|
+
artifact_types.append(
|
|
221
|
+
ArtifactTypeSchema(name=type_name, schema=schema)
|
|
222
|
+
)
|
|
200
223
|
except Exception as e:
|
|
201
224
|
logger.warning(f"Could not get schema for {type_name}: {e}")
|
|
202
225
|
|
|
203
|
-
return
|
|
226
|
+
return ArtifactTypesResponse(artifact_types=artifact_types)
|
|
204
227
|
|
|
205
|
-
@app.get("/api/agents")
|
|
228
|
+
@app.get("/api/agents", tags=["Dashboard UI"])
|
|
206
229
|
async def get_agents() -> dict[str, Any]:
|
|
207
230
|
"""Get all registered agents with logic operations state.
|
|
208
231
|
|
|
@@ -269,7 +292,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
269
292
|
|
|
270
293
|
return {"agents": agents}
|
|
271
294
|
|
|
272
|
-
@app.get("/api/version")
|
|
295
|
+
@app.get("/api/version", tags=["Dashboard UI"])
|
|
273
296
|
async def get_version() -> dict[str, str]:
|
|
274
297
|
"""Get version information for the backend and dashboard.
|
|
275
298
|
|
|
@@ -287,7 +310,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
287
310
|
|
|
288
311
|
return {"backend_version": backend_version, "package_name": "flock-flow"}
|
|
289
312
|
|
|
290
|
-
@app.post("/api/control/publish")
|
|
313
|
+
@app.post("/api/control/publish", tags=["Dashboard UI"])
|
|
291
314
|
async def publish_artifact(body: dict[str, Any]) -> dict[str, str]:
|
|
292
315
|
"""Publish artifact with correlation tracking.
|
|
293
316
|
|
|
@@ -363,7 +386,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
363
386
|
logger.exception(f"Error publishing artifact: {e}")
|
|
364
387
|
raise HTTPException(status_code=500, detail=str(e))
|
|
365
388
|
|
|
366
|
-
@app.post("/api/control/invoke")
|
|
389
|
+
@app.post("/api/control/invoke", tags=["Dashboard UI"])
|
|
367
390
|
async def invoke_agent(body: dict[str, Any]) -> dict[str, Any]:
|
|
368
391
|
"""Directly invoke a specific agent.
|
|
369
392
|
|
|
@@ -447,7 +470,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
447
470
|
logger.exception(f"Error invoking agent: {e}")
|
|
448
471
|
raise HTTPException(status_code=500, detail=str(e))
|
|
449
472
|
|
|
450
|
-
@app.post("/api/control/pause")
|
|
473
|
+
@app.post("/api/control/pause", tags=["Dashboard UI"])
|
|
451
474
|
async def pause_orchestrator() -> dict[str, Any]:
|
|
452
475
|
"""Pause orchestrator (placeholder).
|
|
453
476
|
|
|
@@ -458,7 +481,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
458
481
|
status_code=501, detail="Pause functionality coming in Phase 12"
|
|
459
482
|
)
|
|
460
483
|
|
|
461
|
-
@app.post("/api/control/resume")
|
|
484
|
+
@app.post("/api/control/resume", tags=["Dashboard UI"])
|
|
462
485
|
async def resume_orchestrator() -> dict[str, Any]:
|
|
463
486
|
"""Resume orchestrator (placeholder).
|
|
464
487
|
|
|
@@ -469,7 +492,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
469
492
|
status_code=501, detail="Resume functionality coming in Phase 12"
|
|
470
493
|
)
|
|
471
494
|
|
|
472
|
-
@app.get("/api/traces")
|
|
495
|
+
@app.get("/api/traces", tags=["Dashboard UI"])
|
|
473
496
|
async def get_traces() -> list[dict[str, Any]]:
|
|
474
497
|
"""Get OpenTelemetry traces from DuckDB.
|
|
475
498
|
|
|
@@ -559,7 +582,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
559
582
|
logger.exception(f"Error reading traces from DuckDB: {e}")
|
|
560
583
|
return []
|
|
561
584
|
|
|
562
|
-
@app.get("/api/traces/services")
|
|
585
|
+
@app.get("/api/traces/services", tags=["Dashboard UI"])
|
|
563
586
|
async def get_trace_services() -> dict[str, Any]:
|
|
564
587
|
"""Get list of unique services that have been traced.
|
|
565
588
|
|
|
@@ -605,7 +628,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
605
628
|
logger.exception(f"Error reading trace services: {e}")
|
|
606
629
|
return {"services": [], "operations": []}
|
|
607
630
|
|
|
608
|
-
@app.post("/api/traces/clear")
|
|
631
|
+
@app.post("/api/traces/clear", tags=["Dashboard UI"])
|
|
609
632
|
async def clear_traces() -> dict[str, Any]:
|
|
610
633
|
"""Clear all traces from DuckDB database.
|
|
611
634
|
|
|
@@ -624,7 +647,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
624
647
|
|
|
625
648
|
return result
|
|
626
649
|
|
|
627
|
-
@app.post("/api/traces/query")
|
|
650
|
+
@app.post("/api/traces/query", tags=["Dashboard UI"])
|
|
628
651
|
async def execute_trace_query(request: dict[str, Any]) -> dict[str, Any]:
|
|
629
652
|
"""
|
|
630
653
|
Execute a DuckDB SQL query on the traces database.
|
|
@@ -705,7 +728,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
705
728
|
logger.exception(f"DuckDB query error: {e}")
|
|
706
729
|
return {"error": str(e), "results": [], "columns": []}
|
|
707
730
|
|
|
708
|
-
@app.get("/api/traces/stats")
|
|
731
|
+
@app.get("/api/traces/stats", tags=["Dashboard UI"])
|
|
709
732
|
async def get_trace_stats() -> dict[str, Any]:
|
|
710
733
|
"""Get statistics about the trace database.
|
|
711
734
|
|
|
@@ -795,7 +818,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
795
818
|
"database_size_mb": 0,
|
|
796
819
|
}
|
|
797
820
|
|
|
798
|
-
@app.get("/api/streaming-history/{agent_name}")
|
|
821
|
+
@app.get("/api/streaming-history/{agent_name}", tags=["Dashboard UI"])
|
|
799
822
|
async def get_streaming_history(agent_name: str) -> dict[str, Any]:
|
|
800
823
|
"""Get historical streaming output for a specific agent.
|
|
801
824
|
|
|
@@ -834,7 +857,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
834
857
|
status_code=500, detail=f"Failed to get streaming history: {e!s}"
|
|
835
858
|
)
|
|
836
859
|
|
|
837
|
-
@app.get("/api/artifacts/history/{node_id}")
|
|
860
|
+
@app.get("/api/artifacts/history/{node_id}", tags=["Dashboard UI"])
|
|
838
861
|
async def get_message_history(node_id: str) -> dict[str, Any]:
|
|
839
862
|
"""Get complete message history for a node (both produced and consumed).
|
|
840
863
|
|
|
@@ -931,7 +954,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
931
954
|
status_code=500, detail=f"Failed to get message history: {e!s}"
|
|
932
955
|
)
|
|
933
956
|
|
|
934
|
-
@app.get("/api/agents/{agent_id}/runs")
|
|
957
|
+
@app.get("/api/agents/{agent_id}/runs", tags=["Dashboard UI"])
|
|
935
958
|
async def get_agent_runs(agent_id: str) -> dict[str, Any]:
|
|
936
959
|
"""Get run history for an agent.
|
|
937
960
|
|
|
@@ -991,7 +1014,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
991
1014
|
app = self.app
|
|
992
1015
|
themes_dir = Path(__file__).parent.parent / "themes"
|
|
993
1016
|
|
|
994
|
-
@app.get("/api/themes")
|
|
1017
|
+
@app.get("/api/themes", tags=["Dashboard UI"])
|
|
995
1018
|
async def list_themes() -> dict[str, Any]:
|
|
996
1019
|
"""Get list of available theme names.
|
|
997
1020
|
|
|
@@ -1012,7 +1035,7 @@ class DashboardHTTPService(BlackboardHTTPService):
|
|
|
1012
1035
|
status_code=500, detail=f"Failed to list themes: {e!s}"
|
|
1013
1036
|
)
|
|
1014
1037
|
|
|
1015
|
-
@app.get("/api/themes/{theme_name}")
|
|
1038
|
+
@app.get("/api/themes/{theme_name}", tags=["Dashboard UI"])
|
|
1016
1039
|
async def get_theme(theme_name: str) -> dict[str, Any]:
|
|
1017
1040
|
"""Get theme data by name.
|
|
1018
1041
|
|
flock/orchestrator.py
CHANGED
|
@@ -11,7 +11,7 @@ from contextlib import asynccontextmanager
|
|
|
11
11
|
from datetime import UTC, datetime
|
|
12
12
|
from pathlib import Path
|
|
13
13
|
from typing import TYPE_CHECKING, Any
|
|
14
|
-
from uuid import uuid4
|
|
14
|
+
from uuid import UUID, uuid4
|
|
15
15
|
|
|
16
16
|
from opentelemetry import trace
|
|
17
17
|
from opentelemetry.trace import Status, StatusCode
|
|
@@ -137,6 +137,9 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
137
137
|
self.store: BlackboardStore = store or InMemoryBlackboardStore()
|
|
138
138
|
self._agents: dict[str, Agent] = {}
|
|
139
139
|
self._tasks: set[Task[Any]] = set()
|
|
140
|
+
self._correlation_tasks: dict[
|
|
141
|
+
UUID, set[Task[Any]]
|
|
142
|
+
] = {} # Track tasks by correlation_id
|
|
140
143
|
self._processed: set[tuple[str, str]] = set()
|
|
141
144
|
self._lock = asyncio.Lock()
|
|
142
145
|
self.metrics: dict[str, float] = {"artifacts_published": 0, "agent_runs": 0}
|
|
@@ -163,6 +166,9 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
163
166
|
self._batch_timeout_interval: float = 0.1 # Check every 100ms
|
|
164
167
|
# Phase 1.2: WebSocket manager for real-time dashboard events (set by serve())
|
|
165
168
|
self._websocket_manager: Any = None
|
|
169
|
+
# Dashboard server task and launcher (for non-blocking serve)
|
|
170
|
+
self._server_task: Task[None] | None = None
|
|
171
|
+
self._dashboard_launcher: Any = None
|
|
166
172
|
# Unified tracing support
|
|
167
173
|
self._workflow_span = None
|
|
168
174
|
self._auto_workflow_enabled = os.getenv(
|
|
@@ -242,6 +248,99 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
242
248
|
def agents(self) -> list[Agent]:
|
|
243
249
|
return list(self._agents.values())
|
|
244
250
|
|
|
251
|
+
async def get_correlation_status(self, correlation_id: str) -> dict[str, Any]:
|
|
252
|
+
"""Get the status of a workflow by correlation ID.
|
|
253
|
+
|
|
254
|
+
Args:
|
|
255
|
+
correlation_id: The correlation ID to check
|
|
256
|
+
|
|
257
|
+
Returns:
|
|
258
|
+
Dictionary containing workflow status information:
|
|
259
|
+
- state: "active" if work is pending, "completed" otherwise
|
|
260
|
+
- has_pending_work: True if orchestrator has pending work for this correlation
|
|
261
|
+
- artifact_count: Total number of artifacts with this correlation_id
|
|
262
|
+
- error_count: Number of WorkflowError artifacts
|
|
263
|
+
- started_at: Timestamp of first artifact (if any)
|
|
264
|
+
- last_activity_at: Timestamp of most recent artifact (if any)
|
|
265
|
+
"""
|
|
266
|
+
from uuid import UUID
|
|
267
|
+
|
|
268
|
+
try:
|
|
269
|
+
correlation_uuid = UUID(correlation_id)
|
|
270
|
+
except ValueError as exc:
|
|
271
|
+
raise ValueError(
|
|
272
|
+
f"Invalid correlation_id format: {correlation_id}"
|
|
273
|
+
) from exc
|
|
274
|
+
|
|
275
|
+
# Check if orchestrator has pending work for this correlation
|
|
276
|
+
# 1. Check active tasks for this correlation_id
|
|
277
|
+
has_active_tasks = correlation_uuid in self._correlation_tasks and bool(
|
|
278
|
+
self._correlation_tasks[correlation_uuid]
|
|
279
|
+
)
|
|
280
|
+
|
|
281
|
+
# 2. Check correlation groups (for agents with JoinSpec that haven't yielded yet)
|
|
282
|
+
has_pending_groups = False
|
|
283
|
+
for groups in self._correlation_engine.correlation_groups.values():
|
|
284
|
+
for group_key, group in groups.items():
|
|
285
|
+
# Check if this group belongs to our correlation
|
|
286
|
+
for type_name, artifacts in group.waiting_artifacts.items():
|
|
287
|
+
if any(
|
|
288
|
+
artifact.correlation_id == correlation_uuid
|
|
289
|
+
for artifact in artifacts
|
|
290
|
+
):
|
|
291
|
+
has_pending_groups = True
|
|
292
|
+
break
|
|
293
|
+
if has_pending_groups:
|
|
294
|
+
break
|
|
295
|
+
if has_pending_groups:
|
|
296
|
+
break
|
|
297
|
+
|
|
298
|
+
# Workflow has pending work if EITHER tasks are active OR groups are waiting
|
|
299
|
+
has_pending_work = has_active_tasks or has_pending_groups
|
|
300
|
+
|
|
301
|
+
# Query artifacts for this correlation
|
|
302
|
+
from flock.store import FilterConfig
|
|
303
|
+
|
|
304
|
+
filters = FilterConfig(correlation_id=correlation_id)
|
|
305
|
+
artifacts, total = await self.store.query_artifacts(
|
|
306
|
+
filters, limit=1000, offset=0
|
|
307
|
+
)
|
|
308
|
+
|
|
309
|
+
# Count errors
|
|
310
|
+
error_count = sum(
|
|
311
|
+
1
|
|
312
|
+
for artifact in artifacts
|
|
313
|
+
if artifact.type == "flock.system_artifacts.WorkflowError"
|
|
314
|
+
)
|
|
315
|
+
|
|
316
|
+
# Get timestamps
|
|
317
|
+
started_at = None
|
|
318
|
+
last_activity_at = None
|
|
319
|
+
if artifacts:
|
|
320
|
+
timestamps = [artifact.created_at for artifact in artifacts]
|
|
321
|
+
started_at = min(timestamps).isoformat()
|
|
322
|
+
last_activity_at = max(timestamps).isoformat()
|
|
323
|
+
|
|
324
|
+
# Determine state
|
|
325
|
+
if has_pending_work:
|
|
326
|
+
state = "active"
|
|
327
|
+
elif total == 0:
|
|
328
|
+
state = "not_found"
|
|
329
|
+
elif error_count > 0 and total == error_count:
|
|
330
|
+
state = "failed" # Only error artifacts exist
|
|
331
|
+
else:
|
|
332
|
+
state = "completed"
|
|
333
|
+
|
|
334
|
+
return {
|
|
335
|
+
"correlation_id": correlation_id,
|
|
336
|
+
"state": state,
|
|
337
|
+
"has_pending_work": has_pending_work,
|
|
338
|
+
"artifact_count": total,
|
|
339
|
+
"error_count": error_count,
|
|
340
|
+
"started_at": started_at,
|
|
341
|
+
"last_activity_at": last_activity_at,
|
|
342
|
+
}
|
|
343
|
+
|
|
245
344
|
# Component management -------------------------------------------------
|
|
246
345
|
|
|
247
346
|
def add_component(self, component: OrchestratorComponent) -> Flock:
|
|
@@ -742,6 +841,15 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
742
841
|
except asyncio.CancelledError:
|
|
743
842
|
pass
|
|
744
843
|
|
|
844
|
+
# Cancel background server task if running
|
|
845
|
+
if self._server_task and not self._server_task.done():
|
|
846
|
+
self._server_task.cancel()
|
|
847
|
+
try:
|
|
848
|
+
await self._server_task
|
|
849
|
+
except asyncio.CancelledError:
|
|
850
|
+
pass
|
|
851
|
+
# Note: _cleanup_server_callback will handle launcher.stop()
|
|
852
|
+
|
|
745
853
|
if self._mcp_manager is not None:
|
|
746
854
|
await self._mcp_manager.cleanup_all()
|
|
747
855
|
self._mcp_manager = None
|
|
@@ -757,14 +865,20 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
757
865
|
dashboard_v2: bool = False,
|
|
758
866
|
host: str = "127.0.0.1",
|
|
759
867
|
port: int = 8344,
|
|
760
|
-
|
|
761
|
-
|
|
868
|
+
blocking: bool = True,
|
|
869
|
+
) -> Task[None] | None:
|
|
870
|
+
"""Start HTTP service for the orchestrator.
|
|
762
871
|
|
|
763
872
|
Args:
|
|
764
873
|
dashboard: Enable real-time dashboard with WebSocket support (default: False)
|
|
765
874
|
dashboard_v2: Launch the new dashboard v2 frontend (implies dashboard=True)
|
|
766
875
|
host: Host to bind to (default: "127.0.0.1")
|
|
767
876
|
port: Port to bind to (default: 8344)
|
|
877
|
+
blocking: If True, blocks until server stops. If False, starts server
|
|
878
|
+
in background and returns task handle (default: True)
|
|
879
|
+
|
|
880
|
+
Returns:
|
|
881
|
+
None if blocking=True, or Task handle if blocking=False
|
|
768
882
|
|
|
769
883
|
Examples:
|
|
770
884
|
# Basic HTTP API (no dashboard) - runs until interrupted
|
|
@@ -772,7 +886,75 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
772
886
|
|
|
773
887
|
# With dashboard (WebSocket + browser launch) - runs until interrupted
|
|
774
888
|
await orchestrator.serve(dashboard=True)
|
|
889
|
+
|
|
890
|
+
# Non-blocking mode - start server in background
|
|
891
|
+
await orchestrator.serve(dashboard=True, blocking=False)
|
|
892
|
+
# Now you can publish messages and run other logic
|
|
893
|
+
await orchestrator.publish(my_message)
|
|
894
|
+
await orchestrator.run_until_idle()
|
|
775
895
|
"""
|
|
896
|
+
# If non-blocking, start server in background task
|
|
897
|
+
if not blocking:
|
|
898
|
+
self._server_task = asyncio.create_task(
|
|
899
|
+
self._serve_impl(
|
|
900
|
+
dashboard=dashboard,
|
|
901
|
+
dashboard_v2=dashboard_v2,
|
|
902
|
+
host=host,
|
|
903
|
+
port=port,
|
|
904
|
+
)
|
|
905
|
+
)
|
|
906
|
+
# Add cleanup callback
|
|
907
|
+
self._server_task.add_done_callback(self._cleanup_server_callback)
|
|
908
|
+
# Give server a moment to start
|
|
909
|
+
await asyncio.sleep(0.1)
|
|
910
|
+
return self._server_task
|
|
911
|
+
|
|
912
|
+
# Blocking mode - run server directly with cleanup
|
|
913
|
+
try:
|
|
914
|
+
await self._serve_impl(
|
|
915
|
+
dashboard=dashboard,
|
|
916
|
+
dashboard_v2=dashboard_v2,
|
|
917
|
+
host=host,
|
|
918
|
+
port=port,
|
|
919
|
+
)
|
|
920
|
+
finally:
|
|
921
|
+
# In blocking mode, manually cleanup dashboard launcher
|
|
922
|
+
if self._dashboard_launcher is not None:
|
|
923
|
+
self._dashboard_launcher.stop()
|
|
924
|
+
self._dashboard_launcher = None
|
|
925
|
+
return None
|
|
926
|
+
|
|
927
|
+
def _cleanup_server_callback(self, task: Task[None]) -> None:
|
|
928
|
+
"""Cleanup callback when background server task completes."""
|
|
929
|
+
# Stop dashboard launcher if it was started
|
|
930
|
+
if self._dashboard_launcher is not None:
|
|
931
|
+
try:
|
|
932
|
+
self._dashboard_launcher.stop()
|
|
933
|
+
except Exception as e:
|
|
934
|
+
self._logger.warning(f"Failed to stop dashboard launcher: {e}")
|
|
935
|
+
finally:
|
|
936
|
+
self._dashboard_launcher = None
|
|
937
|
+
|
|
938
|
+
# Clear server task reference
|
|
939
|
+
self._server_task = None
|
|
940
|
+
|
|
941
|
+
# Log any exceptions from the task
|
|
942
|
+
try:
|
|
943
|
+
exc = task.exception()
|
|
944
|
+
if exc and not isinstance(exc, asyncio.CancelledError):
|
|
945
|
+
self._logger.error(f"Server task failed: {exc}", exc_info=exc)
|
|
946
|
+
except asyncio.CancelledError:
|
|
947
|
+
pass # Normal cancellation
|
|
948
|
+
|
|
949
|
+
async def _serve_impl(
|
|
950
|
+
self,
|
|
951
|
+
*,
|
|
952
|
+
dashboard: bool = False,
|
|
953
|
+
dashboard_v2: bool = False,
|
|
954
|
+
host: str = "127.0.0.1",
|
|
955
|
+
port: int = 8344,
|
|
956
|
+
) -> None:
|
|
957
|
+
"""Internal implementation of serve() - actual server logic."""
|
|
776
958
|
if dashboard_v2:
|
|
777
959
|
dashboard = True
|
|
778
960
|
|
|
@@ -837,11 +1019,8 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
837
1019
|
self._dashboard_launcher = launcher
|
|
838
1020
|
|
|
839
1021
|
# Run service (blocking call)
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
finally:
|
|
843
|
-
# Cleanup on exit
|
|
844
|
-
launcher.stop()
|
|
1022
|
+
# Note: Cleanup is handled by serve() (blocking mode) or callback (non-blocking mode)
|
|
1023
|
+
await service.run_async(host=host, port=port)
|
|
845
1024
|
|
|
846
1025
|
# Scheduling -----------------------------------------------------------
|
|
847
1026
|
|
|
@@ -1419,6 +1598,24 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
1419
1598
|
)
|
|
1420
1599
|
self._tasks.add(task)
|
|
1421
1600
|
task.add_done_callback(self._tasks.discard)
|
|
1601
|
+
|
|
1602
|
+
# Track task by correlation_id for workflow status tracking
|
|
1603
|
+
correlation_id = artifacts[0].correlation_id if artifacts else None
|
|
1604
|
+
if correlation_id:
|
|
1605
|
+
if correlation_id not in self._correlation_tasks:
|
|
1606
|
+
self._correlation_tasks[correlation_id] = set()
|
|
1607
|
+
self._correlation_tasks[correlation_id].add(task)
|
|
1608
|
+
|
|
1609
|
+
# Clean up correlation tracking when task completes
|
|
1610
|
+
def cleanup_correlation(t: Task[Any]) -> None:
|
|
1611
|
+
if correlation_id in self._correlation_tasks:
|
|
1612
|
+
self._correlation_tasks[correlation_id].discard(t)
|
|
1613
|
+
# Remove empty sets to prevent memory leaks
|
|
1614
|
+
if not self._correlation_tasks[correlation_id]:
|
|
1615
|
+
del self._correlation_tasks[correlation_id]
|
|
1616
|
+
|
|
1617
|
+
task.add_done_callback(cleanup_correlation)
|
|
1618
|
+
|
|
1422
1619
|
return task
|
|
1423
1620
|
|
|
1424
1621
|
def _record_agent_run(self, agent: Agent) -> None:
|
|
@@ -1477,7 +1674,47 @@ class Flock(metaclass=AutoTracedMeta):
|
|
|
1477
1674
|
self._record_agent_run(agent)
|
|
1478
1675
|
|
|
1479
1676
|
# Phase 6: Execute agent (returns artifacts, doesn't publish)
|
|
1480
|
-
|
|
1677
|
+
# Wrap in try/catch to handle agent failures gracefully
|
|
1678
|
+
try:
|
|
1679
|
+
outputs = await agent.execute(ctx, artifacts)
|
|
1680
|
+
except asyncio.CancelledError:
|
|
1681
|
+
# Re-raise cancellations immediately (shutdown, user cancellation)
|
|
1682
|
+
# Do NOT treat these as errors - they're intentional interruptions
|
|
1683
|
+
self._logger.debug(
|
|
1684
|
+
f"Agent '{agent.name}' task cancelled (task={ctx.task_id})"
|
|
1685
|
+
)
|
|
1686
|
+
raise # Propagate cancellation so task.cancelled() == True
|
|
1687
|
+
except Exception as exc:
|
|
1688
|
+
# Agent already called component.on_error hooks before re-raising
|
|
1689
|
+
# Now orchestrator publishes error artifact and continues workflow
|
|
1690
|
+
from flock.system_artifacts import WorkflowError
|
|
1691
|
+
|
|
1692
|
+
error_artifact_data = WorkflowError(
|
|
1693
|
+
failed_agent=agent.name,
|
|
1694
|
+
error_type=type(exc).__name__,
|
|
1695
|
+
error_message=str(exc),
|
|
1696
|
+
timestamp=datetime.now(UTC),
|
|
1697
|
+
task_id=ctx.task_id,
|
|
1698
|
+
)
|
|
1699
|
+
|
|
1700
|
+
# Build and publish error artifact with correlation_id
|
|
1701
|
+
from flock.artifacts import ArtifactSpec
|
|
1702
|
+
|
|
1703
|
+
error_spec = ArtifactSpec.from_model(WorkflowError)
|
|
1704
|
+
error_artifact = error_spec.build(
|
|
1705
|
+
produced_by=f"orchestrator#{agent.name}",
|
|
1706
|
+
data=error_artifact_data.model_dump(),
|
|
1707
|
+
correlation_id=correlation_id,
|
|
1708
|
+
)
|
|
1709
|
+
|
|
1710
|
+
await self._persist_and_schedule(error_artifact)
|
|
1711
|
+
|
|
1712
|
+
# Log error but don't re-raise - workflow continues
|
|
1713
|
+
self._logger.error(
|
|
1714
|
+
f"Agent '{agent.name}' failed (task={ctx.task_id}): {exc}",
|
|
1715
|
+
exc_info=True,
|
|
1716
|
+
)
|
|
1717
|
+
return # Exit early - no outputs to publish
|
|
1481
1718
|
|
|
1482
1719
|
# Phase 6: Orchestrator publishes outputs (security fix)
|
|
1483
1720
|
# This fixes Vulnerability #2 (WRITE Bypass) - agents can't bypass validation
|
flock/service.py
CHANGED
|
@@ -10,6 +10,20 @@ from uuid import UUID
|
|
|
10
10
|
from fastapi import FastAPI, HTTPException, Query
|
|
11
11
|
from fastapi.responses import PlainTextResponse
|
|
12
12
|
|
|
13
|
+
from flock.api_models import (
|
|
14
|
+
Agent,
|
|
15
|
+
AgentListResponse,
|
|
16
|
+
AgentRunRequest,
|
|
17
|
+
AgentRunResponse,
|
|
18
|
+
AgentSubscription,
|
|
19
|
+
ArtifactListResponse,
|
|
20
|
+
ArtifactPublishRequest,
|
|
21
|
+
ArtifactPublishResponse,
|
|
22
|
+
ArtifactSummaryResponse,
|
|
23
|
+
CorrelationStatusResponse,
|
|
24
|
+
HealthResponse,
|
|
25
|
+
ProducedArtifact,
|
|
26
|
+
)
|
|
13
27
|
from flock.registry import type_registry
|
|
14
28
|
from flock.store import ArtifactEnvelope, ConsumptionRecord, FilterConfig
|
|
15
29
|
|
|
@@ -21,7 +35,21 @@ if TYPE_CHECKING:
|
|
|
21
35
|
class BlackboardHTTPService:
|
|
22
36
|
def __init__(self, orchestrator: Flock) -> None:
|
|
23
37
|
self.orchestrator = orchestrator
|
|
24
|
-
self.app = FastAPI(
|
|
38
|
+
self.app = FastAPI(
|
|
39
|
+
title="Flock REST API Documentation",
|
|
40
|
+
version="1.0.0",
|
|
41
|
+
description="RESTful API for interacting with Flock agents and artifacts",
|
|
42
|
+
openapi_tags=[
|
|
43
|
+
{
|
|
44
|
+
"name": "Public API",
|
|
45
|
+
"description": "**Production-ready endpoints** for publishing artifacts, running agents, and querying data. Use these in your applications.",
|
|
46
|
+
},
|
|
47
|
+
{
|
|
48
|
+
"name": "Health & Metrics",
|
|
49
|
+
"description": "Monitoring endpoints for health checks and metrics collection.",
|
|
50
|
+
},
|
|
51
|
+
],
|
|
52
|
+
)
|
|
25
53
|
self._register_routes()
|
|
26
54
|
|
|
27
55
|
def _register_routes(self) -> None:
|
|
@@ -92,19 +120,25 @@ class BlackboardHTTPService:
|
|
|
92
120
|
end=_parse_datetime(end, "to"),
|
|
93
121
|
)
|
|
94
122
|
|
|
95
|
-
@app.post(
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
123
|
+
@app.post(
|
|
124
|
+
"/api/v1/artifacts",
|
|
125
|
+
response_model=ArtifactPublishResponse,
|
|
126
|
+
tags=["Public API"],
|
|
127
|
+
)
|
|
128
|
+
async def publish_artifact(
|
|
129
|
+
body: ArtifactPublishRequest,
|
|
130
|
+
) -> ArtifactPublishResponse:
|
|
101
131
|
try:
|
|
102
|
-
await orchestrator.publish({"type":
|
|
132
|
+
await orchestrator.publish({"type": body.type, **body.payload})
|
|
103
133
|
except Exception as exc: # pragma: no cover - FastAPI converts
|
|
104
134
|
raise HTTPException(status_code=400, detail=str(exc)) from exc
|
|
105
|
-
return
|
|
135
|
+
return ArtifactPublishResponse(status="accepted")
|
|
106
136
|
|
|
107
|
-
@app.get(
|
|
137
|
+
@app.get(
|
|
138
|
+
"/api/v1/artifacts",
|
|
139
|
+
response_model=ArtifactListResponse,
|
|
140
|
+
tags=["Public API"],
|
|
141
|
+
)
|
|
108
142
|
async def list_artifacts(
|
|
109
143
|
type_names: list[str] | None = Query(None, alias="type"),
|
|
110
144
|
produced_by: list[str] | None = Query(None),
|
|
@@ -116,7 +150,7 @@ class BlackboardHTTPService:
|
|
|
116
150
|
limit: int = Query(50, ge=1, le=500),
|
|
117
151
|
offset: int = Query(0, ge=0),
|
|
118
152
|
embed_meta: bool = Query(False, alias="embed_meta"),
|
|
119
|
-
) ->
|
|
153
|
+
) -> ArtifactListResponse:
|
|
120
154
|
filters = _make_filter_config(
|
|
121
155
|
type_names,
|
|
122
156
|
produced_by,
|
|
@@ -140,12 +174,16 @@ class BlackboardHTTPService:
|
|
|
140
174
|
)
|
|
141
175
|
else:
|
|
142
176
|
items.append(_serialize_artifact(artifact))
|
|
143
|
-
return
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
177
|
+
return ArtifactListResponse(
|
|
178
|
+
items=items,
|
|
179
|
+
pagination={"limit": limit, "offset": offset, "total": total},
|
|
180
|
+
)
|
|
147
181
|
|
|
148
|
-
@app.get(
|
|
182
|
+
@app.get(
|
|
183
|
+
"/api/v1/artifacts/summary",
|
|
184
|
+
response_model=ArtifactSummaryResponse,
|
|
185
|
+
tags=["Public API"],
|
|
186
|
+
)
|
|
149
187
|
async def summarize_artifacts(
|
|
150
188
|
type_names: list[str] | None = Query(None, alias="type"),
|
|
151
189
|
produced_by: list[str] | None = Query(None),
|
|
@@ -154,7 +192,7 @@ class BlackboardHTTPService:
|
|
|
154
192
|
start: str | None = Query(None, alias="from"),
|
|
155
193
|
end: str | None = Query(None, alias="to"),
|
|
156
194
|
visibility: list[str] | None = Query(None),
|
|
157
|
-
) ->
|
|
195
|
+
) -> ArtifactSummaryResponse:
|
|
158
196
|
filters = _make_filter_config(
|
|
159
197
|
type_names,
|
|
160
198
|
produced_by,
|
|
@@ -165,33 +203,30 @@ class BlackboardHTTPService:
|
|
|
165
203
|
end,
|
|
166
204
|
)
|
|
167
205
|
summary = await orchestrator.store.summarize_artifacts(filters)
|
|
168
|
-
return
|
|
206
|
+
return ArtifactSummaryResponse(summary=summary)
|
|
169
207
|
|
|
170
|
-
@app.get("/api/v1/artifacts/{artifact_id}")
|
|
208
|
+
@app.get("/api/v1/artifacts/{artifact_id}", tags=["Public API"])
|
|
171
209
|
async def get_artifact(artifact_id: UUID) -> dict[str, Any]:
|
|
172
210
|
artifact = await orchestrator.store.get(artifact_id)
|
|
173
211
|
if artifact is None:
|
|
174
212
|
raise HTTPException(status_code=404, detail="artifact not found")
|
|
175
213
|
return _serialize_artifact(artifact)
|
|
176
214
|
|
|
177
|
-
@app.post(
|
|
178
|
-
|
|
215
|
+
@app.post(
|
|
216
|
+
"/api/v1/agents/{name}/run",
|
|
217
|
+
response_model=AgentRunResponse,
|
|
218
|
+
tags=["Public API"],
|
|
219
|
+
)
|
|
220
|
+
async def run_agent(name: str, body: AgentRunRequest) -> AgentRunResponse:
|
|
179
221
|
try:
|
|
180
222
|
agent = orchestrator.get_agent(name)
|
|
181
223
|
except KeyError as exc:
|
|
182
224
|
raise HTTPException(status_code=404, detail="agent not found") from exc
|
|
183
225
|
|
|
184
|
-
inputs_data: list[dict[str, Any]] = body.get("inputs") or []
|
|
185
226
|
inputs = []
|
|
186
|
-
for item in
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
if not type_name:
|
|
190
|
-
raise HTTPException(
|
|
191
|
-
status_code=400, detail="Each input requires 'type'."
|
|
192
|
-
)
|
|
193
|
-
model = type_registry.resolve(type_name)
|
|
194
|
-
instance = model(**payload)
|
|
227
|
+
for item in body.inputs:
|
|
228
|
+
model = type_registry.resolve(item.type)
|
|
229
|
+
instance = model(**item.payload)
|
|
195
230
|
inputs.append(instance)
|
|
196
231
|
|
|
197
232
|
try:
|
|
@@ -201,40 +236,42 @@ class BlackboardHTTPService:
|
|
|
201
236
|
status_code=500, detail=f"Agent execution failed: {exc}"
|
|
202
237
|
) from exc
|
|
203
238
|
|
|
204
|
-
return
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
239
|
+
return AgentRunResponse(
|
|
240
|
+
artifacts=[
|
|
241
|
+
ProducedArtifact(
|
|
242
|
+
id=str(artifact.id),
|
|
243
|
+
type=artifact.type,
|
|
244
|
+
payload=artifact.payload,
|
|
245
|
+
produced_by=artifact.produced_by,
|
|
246
|
+
)
|
|
212
247
|
for artifact in outputs
|
|
213
248
|
]
|
|
214
|
-
|
|
249
|
+
)
|
|
215
250
|
|
|
216
|
-
@app.get(
|
|
217
|
-
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
|
|
251
|
+
@app.get(
|
|
252
|
+
"/api/v1/agents", response_model=AgentListResponse, tags=["Public API"]
|
|
253
|
+
)
|
|
254
|
+
async def list_agents() -> AgentListResponse:
|
|
255
|
+
return AgentListResponse(
|
|
256
|
+
agents=[
|
|
257
|
+
Agent(
|
|
258
|
+
name=agent.name,
|
|
259
|
+
description=agent.description or "",
|
|
260
|
+
subscriptions=[
|
|
261
|
+
AgentSubscription(
|
|
262
|
+
types=list(subscription.type_names),
|
|
263
|
+
mode=subscription.mode,
|
|
264
|
+
delivery=subscription.delivery,
|
|
265
|
+
)
|
|
229
266
|
for subscription in agent.subscriptions
|
|
230
267
|
],
|
|
231
|
-
|
|
232
|
-
|
|
268
|
+
outputs=[output.spec.type_name for output in agent.outputs],
|
|
269
|
+
)
|
|
233
270
|
for agent in orchestrator.agents
|
|
234
271
|
]
|
|
235
|
-
|
|
272
|
+
)
|
|
236
273
|
|
|
237
|
-
@app.get("/api/v1/agents/{agent_id}/history-summary")
|
|
274
|
+
@app.get("/api/v1/agents/{agent_id}/history-summary", tags=["Public API"])
|
|
238
275
|
async def agent_history(
|
|
239
276
|
agent_id: str,
|
|
240
277
|
type_names: list[str] | None = Query(None, alias="type"),
|
|
@@ -257,11 +294,32 @@ class BlackboardHTTPService:
|
|
|
257
294
|
summary = await orchestrator.store.agent_history_summary(agent_id, filters)
|
|
258
295
|
return {"agent_id": agent_id, "summary": summary}
|
|
259
296
|
|
|
260
|
-
@app.get(
|
|
261
|
-
|
|
262
|
-
|
|
297
|
+
@app.get(
|
|
298
|
+
"/api/v1/correlations/{correlation_id}/status",
|
|
299
|
+
response_model=CorrelationStatusResponse,
|
|
300
|
+
tags=["Public API"],
|
|
301
|
+
)
|
|
302
|
+
async def get_correlation_status(
|
|
303
|
+
correlation_id: str,
|
|
304
|
+
) -> CorrelationStatusResponse:
|
|
305
|
+
"""Get the status of a workflow by correlation ID.
|
|
306
|
+
|
|
307
|
+
Returns workflow state (active/completed/failed/not_found), pending work status,
|
|
308
|
+
artifact counts, error counts, and timestamps.
|
|
309
|
+
|
|
310
|
+
This endpoint is useful for polling to check if a workflow has completed.
|
|
311
|
+
"""
|
|
312
|
+
try:
|
|
313
|
+
status = await orchestrator.get_correlation_status(correlation_id)
|
|
314
|
+
return CorrelationStatusResponse(**status)
|
|
315
|
+
except ValueError as exc:
|
|
316
|
+
raise HTTPException(status_code=400, detail=str(exc)) from exc
|
|
317
|
+
|
|
318
|
+
@app.get("/health", response_model=HealthResponse, tags=["Health & Metrics"])
|
|
319
|
+
async def health() -> HealthResponse: # pragma: no cover - trivial
|
|
320
|
+
return HealthResponse(status="ok")
|
|
263
321
|
|
|
264
|
-
@app.get("/metrics")
|
|
322
|
+
@app.get("/metrics", tags=["Health & Metrics"])
|
|
265
323
|
async def metrics() -> PlainTextResponse:
|
|
266
324
|
lines = [
|
|
267
325
|
f"blackboard_{key} {value}"
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""System-level artifact types published by the Flock orchestrator.
|
|
2
|
+
|
|
3
|
+
These artifacts provide workflow telemetry and error tracking.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
|
|
8
|
+
from pydantic import BaseModel, Field
|
|
9
|
+
|
|
10
|
+
from flock.registry import flock_type
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
@flock_type
|
|
14
|
+
class WorkflowError(BaseModel):
|
|
15
|
+
"""Error artifact published when an agent execution fails.
|
|
16
|
+
|
|
17
|
+
This artifact is automatically published by the orchestrator when an agent
|
|
18
|
+
raises an exception during execution. It includes the correlation_id to enable
|
|
19
|
+
error tracking for workflows.
|
|
20
|
+
|
|
21
|
+
The workflow continues execution for other branches even when this is published.
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
failed_agent: str = Field(description="Name of the agent that failed")
|
|
25
|
+
error_type: str = Field(description="Type of exception that occurred")
|
|
26
|
+
error_message: str = Field(description="Error message from the exception")
|
|
27
|
+
timestamp: datetime = Field(description="When the error occurred")
|
|
28
|
+
task_id: str | None = Field(
|
|
29
|
+
default=None, description="Task ID of the failed execution"
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
__all__ = ["WorkflowError"]
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: flock-core
|
|
3
|
-
Version: 0.5.
|
|
3
|
+
Version: 0.5.11
|
|
4
4
|
Summary: Flock: A declrative framework for building and orchestrating AI agents.
|
|
5
5
|
Author-email: Andre Ratzenberger <andre.ratzenberger@whiteduck.de>
|
|
6
6
|
License: MIT
|
|
@@ -287,63 +287,6 @@ asyncio.run(main())
|
|
|
287
287
|
|
|
288
288
|
---
|
|
289
289
|
|
|
290
|
-
## Context Provider Primer (Security + Cost)
|
|
291
|
-
|
|
292
|
-
Context Providers are the security and efficiency layer between agents and the blackboard. They decide what each agent sees in its historical context and always enforce visibility rules.
|
|
293
|
-
|
|
294
|
-
Why use them
|
|
295
|
-
- Enforce access control (visibility is applied before engines see data)
|
|
296
|
-
- Cut context size (often 90%+) by filtering to what matters
|
|
297
|
-
- Specialize per agent or set a global default
|
|
298
|
-
|
|
299
|
-
Copy/paste examples
|
|
300
|
-
```python
|
|
301
|
-
from flock import Flock
|
|
302
|
-
from flock.context_provider import FilteredContextProvider
|
|
303
|
-
from flock.store import FilterConfig
|
|
304
|
-
|
|
305
|
-
# Global: show only urgent items (limit context size)
|
|
306
|
-
flock = Flock(
|
|
307
|
-
"openai/gpt-4.1",
|
|
308
|
-
context_provider=FilteredContextProvider(FilterConfig(tags={"urgent"}), limit=50)
|
|
309
|
-
)
|
|
310
|
-
|
|
311
|
-
# Per-agent: override global for a specific role
|
|
312
|
-
senior = flock.agent("senior").consumes(LogEntry).publishes(Analysis)
|
|
313
|
-
senior.context_provider = FilteredContextProvider(
|
|
314
|
-
FilterConfig(tags={"ERROR", "WARN"}),
|
|
315
|
-
limit=200
|
|
316
|
-
)
|
|
317
|
-
```
|
|
318
|
-
|
|
319
|
-
Learn more: docs/guides/context-providers.md
|
|
320
|
-
|
|
321
|
-
---
|
|
322
|
-
|
|
323
|
-
## Persistent Blackboard History
|
|
324
|
-
|
|
325
|
-
The in-memory store is still great for local tinkering, but production teams now have a durable option. Plugging in `SQLiteBlackboardStore` turns the blackboard into a persistent event log with first-class ergonomics:
|
|
326
|
-
|
|
327
|
-
- **Long-lived artifacts** — every field (payload, tags, partition keys, visibility) is stored for replay, audits, and postmortems
|
|
328
|
-
- **Historical APIs** — `/api/v1/artifacts`, `/summary`, and `/agents/{agent_id}/history-summary` expose pagination, filtering, and consumption counts
|
|
329
|
-
- **Dashboard module** — the new **Historical Blackboard** experience preloads persisted history, enriches the graph with consumer metadata, and highlights retention windows
|
|
330
|
-
- **Operational tooling** — CLI helpers (`init-sqlite-store`, `sqlite-maintenance --delete-before ... --vacuum`) make schema setup and retention policies scriptable
|
|
331
|
-
|
|
332
|
-
Quick start:
|
|
333
|
-
|
|
334
|
-
```python
|
|
335
|
-
from flock import Flock
|
|
336
|
-
from flock.store import SQLiteBlackboardStore
|
|
337
|
-
|
|
338
|
-
store = SQLiteBlackboardStore(".flock/blackboard.db")
|
|
339
|
-
await store.ensure_schema()
|
|
340
|
-
flock = Flock("openai/gpt-4.1", store=store)
|
|
341
|
-
```
|
|
342
|
-
|
|
343
|
-
Run `examples/02-the-blackboard/01_persistent_pizza.py` to generate history, then launch `examples/03-the-dashboard/04_persistent_pizza_dashboard.py` and explore previous runs, consumption trails, and retention banners inside the dashboard.
|
|
344
|
-
|
|
345
|
-
---
|
|
346
|
-
|
|
347
290
|
## Core Concepts
|
|
348
291
|
|
|
349
292
|
### Typed Artifacts (The Vocabulary)
|
|
@@ -673,6 +616,28 @@ agent.context_provider = FilteredContextProvider(
|
|
|
673
616
|
|
|
674
617
|
**📖 [Learn more: Context Providers Guide](https://whiteducksoftware.github.io/flock/guides/context-providers/) | [Steal production code →](examples/08-context-provider/)**
|
|
675
618
|
|
|
619
|
+
### Persistent Blackboard History
|
|
620
|
+
|
|
621
|
+
The in-memory store is great for local development, but production teams need durability. The `SQLiteBlackboardStore` turns the blackboard into a persistent event log with first-class ergonomics:
|
|
622
|
+
|
|
623
|
+
**What you get:**
|
|
624
|
+
- **Long-lived artifacts** — Every field (payload, tags, partition keys, visibility) stored for replay, audits, and postmortems
|
|
625
|
+
- **Historical APIs** — `/api/v1/artifacts`, `/summary`, and `/agents/{agent_id}/history-summary` expose pagination, filtering, and consumption counts
|
|
626
|
+
- **Dashboard integration** — The **Historical Blackboard** view preloads persisted history, enriches the graph with consumer metadata, and highlights retention windows
|
|
627
|
+
- **Operational tooling** — CLI helpers (`init-sqlite-store`, `sqlite-maintenance --delete-before ... --vacuum`) make schema setup and retention policies scriptable
|
|
628
|
+
|
|
629
|
+
**Quick start:**
|
|
630
|
+
```python
|
|
631
|
+
from flock import Flock
|
|
632
|
+
from flock.store import SQLiteBlackboardStore
|
|
633
|
+
|
|
634
|
+
store = SQLiteBlackboardStore(".flock/blackboard.db")
|
|
635
|
+
await store.ensure_schema()
|
|
636
|
+
flock = Flock("openai/gpt-4.1", store=store)
|
|
637
|
+
```
|
|
638
|
+
|
|
639
|
+
**Try it:** Run `examples/02-the-blackboard/01_persistent_pizza.py` to generate history, then launch `examples/03-the-dashboard/04_persistent_pizza_dashboard.py` to explore previous runs, consumption trails, and retention banners.
|
|
640
|
+
|
|
676
641
|
### Batching Pattern: Parallel Execution Control
|
|
677
642
|
|
|
678
643
|
**A key differentiator:** The separation of `publish()` and `run_until_idle()` enables parallel execution.
|
|
@@ -800,6 +765,35 @@ agent.best_of(150, ...) # ⚠️ Warns: "best_of(150) is very high - high LLM c
|
|
|
800
765
|
|
|
801
766
|
## Production-Ready Observability
|
|
802
767
|
|
|
768
|
+
### Sophisticated REST API
|
|
769
|
+
|
|
770
|
+
**Production-ready HTTP endpoints with comprehensive OpenAPI documentation:**
|
|
771
|
+
|
|
772
|
+
Flock includes a fully-featured REST API for programmatic access to the blackboard, agents, and workflow orchestration. Perfect for integration with external systems, building custom UIs, or monitoring production deployments.
|
|
773
|
+
|
|
774
|
+
**Key endpoints:**
|
|
775
|
+
- `POST /api/v1/artifacts` - Publish artifacts to the blackboard
|
|
776
|
+
- `GET /api/v1/artifacts` - Query artifacts with filtering, pagination, and consumption metadata
|
|
777
|
+
- `POST /api/v1/agents/{name}/run` - Direct agent invocation
|
|
778
|
+
- `GET /api/v1/correlations/{correlation_id}/status` - Workflow completion tracking
|
|
779
|
+
- `GET /api/v1/agents` - List all registered agents with subscriptions
|
|
780
|
+
- `GET /health` and `GET /metrics` - Production monitoring
|
|
781
|
+
|
|
782
|
+
**Start the API server:**
|
|
783
|
+
```python
|
|
784
|
+
await flock.serve(dashboard=True) # API + Dashboard on port 8344
|
|
785
|
+
# API docs: http://localhost:8344/docs
|
|
786
|
+
```
|
|
787
|
+
|
|
788
|
+
**Features:**
|
|
789
|
+
- ✅ **OpenAPI 3.0** - Interactive documentation at `/docs`
|
|
790
|
+
- ✅ **Pydantic validation** - Type-safe request/response models
|
|
791
|
+
- ✅ **Correlation tracking** - Monitor workflow completion with polling
|
|
792
|
+
- ✅ **Consumption metadata** - Full artifact lineage and agent execution trails
|
|
793
|
+
- ✅ **Production monitoring** - Health checks and Prometheus-compatible metrics
|
|
794
|
+
|
|
795
|
+
**📖 [Explore the API →](http://localhost:8344/docs)** (start the server first!)
|
|
796
|
+
|
|
803
797
|
### Real-Time Dashboard
|
|
804
798
|
|
|
805
799
|
**Start the dashboard with one line:**
|
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
flock/__init__.py,sha256=fvp4ltfaAGmYliShuTY_XVIpOUN6bMXbWiBnwb1NBoM,310
|
|
2
2
|
flock/agent.py,sha256=luggPsY2pAnjt4TQ-S-ez1us5R2qk6UC-12ccjP6cMk,62607
|
|
3
|
+
flock/api_models.py,sha256=Ot8riWjpf4p94P5IsdGz1vfQOizwLFrqwyzqkZWJDjc,9536
|
|
3
4
|
flock/artifact_collector.py,sha256=j9p0qvu9IrmMTvOwdX3PGhEnj1-A3uMx0bMfG9zBdgo,6410
|
|
4
5
|
flock/artifacts.py,sha256=QIUlol5hzUZO_8SINCqsN_Xr8fHixsc01QbFV_WIIj0,2527
|
|
5
6
|
flock/batch_accumulator.py,sha256=4ZZLERkpH0XzN87-z2b5Tyjubheo_yCxoXz4lsjxUuk,8017
|
|
@@ -8,13 +9,14 @@ flock/components.py,sha256=SpqFmYBLNIGkKkVrfGpi-9eFj7jKA0N8VJHL-HiyzRc,8092
|
|
|
8
9
|
flock/context_provider.py,sha256=Le01ZilMobaaPqZVJiBGFWkv1XnN3K6CtnPVD5Ewphk,20245
|
|
9
10
|
flock/correlation_engine.py,sha256=pFD1y13bSMZ9HpUpp6RHWr5vb39gd0mf-rRQIFI0R5Q,8055
|
|
10
11
|
flock/examples.py,sha256=z83GXjtroxRKplt57ur3v8UJFwf9cCHtZ3gj7QHiigs,3688
|
|
11
|
-
flock/orchestrator.py,sha256=
|
|
12
|
+
flock/orchestrator.py,sha256=UgHL6knHZy6PHRmL_GOL91xDr2wfgFv0yB7SKS8AIj4,78263
|
|
12
13
|
flock/orchestrator_component.py,sha256=lBn8WXiLzw3MXcU5JrNC3uH2SruLBCj8bSnSzH9txes,25210
|
|
13
14
|
flock/registry.py,sha256=eCSK1yOgcY53vzaNFG4qnlLP3H9R4NWi9fTq_WUNjus,4846
|
|
14
15
|
flock/runtime.py,sha256=DCClW_53ped_M0uMravPXmEGgVn_tOsyynNVYjZyM3c,10771
|
|
15
|
-
flock/service.py,sha256=
|
|
16
|
+
flock/service.py,sha256=6-j2QPmtWiniZtEAfvAzdsLdEGPAfCPuwcusXFZfs1A,13341
|
|
16
17
|
flock/store.py,sha256=hvecQqmx1xov7FyRCTwnyFJTpGb_7LHrHHQhGRWRUIE,46029
|
|
17
18
|
flock/subscription.py,sha256=2_0jCxOlk-G1jsD6tiVILmKdmx-Wm1qGYzCZVbz_ktg,5424
|
|
19
|
+
flock/system_artifacts.py,sha256=f4O-gErJRy5aMFVDh6ZGcj_5Gr130WnG28m5AfhnbxU,1083
|
|
18
20
|
flock/utilities.py,sha256=PNIIBrtvwAkW6cI_OxF2dPS151ZjDNbFOaCfsul2BpE,12473
|
|
19
21
|
flock/visibility.py,sha256=uwscg32t6Dp9LuA_EVDT5-_1lotzZWWS9Dl1foyJDxo,2926
|
|
20
22
|
flock/api/themes.py,sha256=7L-Bxhh8N1FXYsyTy7B_n8mSCGl_wDZXrvyY2llkF-A,1938
|
|
@@ -23,7 +25,7 @@ flock/dashboard/collector.py,sha256=zMDv1OYWrDz5obNVMrc7uLImUH5yWIYCaMB0h-Uu2kY,
|
|
|
23
25
|
flock/dashboard/events.py,sha256=ptKNaeHcgYnIO8rBTEK56ZC5w-dUWakXhNzDep1XaL4,7653
|
|
24
26
|
flock/dashboard/graph_builder.py,sha256=jdiaruce5uRybndlA4aiOXW-gKkHeGtnndzYQ1WyU9Y,32006
|
|
25
27
|
flock/dashboard/launcher.py,sha256=qnl1sFm9g2dAyzhyLPrfqAfld63G9aTNRHchgNuGnoo,8218
|
|
26
|
-
flock/dashboard/service.py,sha256
|
|
28
|
+
flock/dashboard/service.py,sha256=-ksQ12D--EAi52WPR3PgBpRig0QZL8Fd9mq5PMEDBrI,54791
|
|
27
29
|
flock/dashboard/websocket.py,sha256=qv2cbX9DBhglYgGGPFsD9ryehkBsIcJQGg7tJTIDyMM,9689
|
|
28
30
|
flock/dashboard/models/__init__.py,sha256=T4Yz8IXMm7lBqa2HLDSv7WJBtaKcdZIlTrz6GHNFZxs,68
|
|
29
31
|
flock/dashboard/models/graph.py,sha256=mQBzaogaOV1sss4aI3yNK8Kg4SdztPwIu3VQfrB3zLU,5444
|
|
@@ -531,8 +533,8 @@ flock/themes/zenburned.toml,sha256=UEmquBbcAO3Zj652XKUwCsNoC2iQSlIh-q5c6DH-7Kc,1
|
|
|
531
533
|
flock/themes/zenwritten-dark.toml,sha256=-dgaUfg1iCr5Dv4UEeHv_cN4GrPUCWAiHSxWK20X1kI,1663
|
|
532
534
|
flock/themes/zenwritten-light.toml,sha256=G1iEheCPfBNsMTGaVpEVpDzYBHA_T-MV27rolUYolmE,1666
|
|
533
535
|
flock/utility/output_utility_component.py,sha256=YXzVy4EZXTrz0o6DjlE6tna4N79T5S5b98qDS42KqXM,9316
|
|
534
|
-
flock_core-0.5.
|
|
535
|
-
flock_core-0.5.
|
|
536
|
-
flock_core-0.5.
|
|
537
|
-
flock_core-0.5.
|
|
538
|
-
flock_core-0.5.
|
|
536
|
+
flock_core-0.5.11.dist-info/METADATA,sha256=r87xPBNRpCHFwDSyKCVDLflsouPrAO5fXPh9BN3HlB0,51253
|
|
537
|
+
flock_core-0.5.11.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
538
|
+
flock_core-0.5.11.dist-info/entry_points.txt,sha256=UQdPmtHd97gSA_IdLt9MOd-1rrf_WO-qsQeIiHWVrp4,42
|
|
539
|
+
flock_core-0.5.11.dist-info/licenses/LICENSE,sha256=U3IZuTbC0yLj7huwJdldLBipSOHF4cPf6cUOodFiaBE,1072
|
|
540
|
+
flock_core-0.5.11.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|