dacp 0.3.3__tar.gz → 0.3.4__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {dacp-0.3.3 → dacp-0.3.4}/PKG-INFO +8 -3
- {dacp-0.3.3 → dacp-0.3.4}/dacp/__init__.py +1 -1
- dacp-0.3.4/dacp/api.py +365 -0
- dacp-0.3.4/dacp/cli.py +249 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp/intelligence.py +10 -28
- {dacp-0.3.3 → dacp-0.3.4}/dacp/json_parser.py +47 -49
- {dacp-0.3.3 → dacp-0.3.4}/dacp/logging_config.py +1 -3
- {dacp-0.3.3 → dacp-0.3.4}/dacp/orchestrator.py +10 -28
- {dacp-0.3.3 → dacp-0.3.4}/dacp/tools.py +1 -3
- {dacp-0.3.3 → dacp-0.3.4}/dacp/workflow.py +30 -25
- {dacp-0.3.3 → dacp-0.3.4}/dacp/workflow_runtime.py +148 -110
- {dacp-0.3.3 → dacp-0.3.4}/dacp.egg-info/PKG-INFO +8 -3
- {dacp-0.3.3 → dacp-0.3.4}/dacp.egg-info/SOURCES.txt +3 -0
- dacp-0.3.4/dacp.egg-info/entry_points.txt +2 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp.egg-info/requires.txt +8 -2
- {dacp-0.3.3 → dacp-0.3.4}/pyproject.toml +62 -10
- {dacp-0.3.3 → dacp-0.3.4}/tests/test_orchestrator.py +2 -6
- {dacp-0.3.3 → dacp-0.3.4}/tests/test_tools.py +1 -3
- {dacp-0.3.3 → dacp-0.3.4}/LICENSE +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/README.md +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp/exceptions.py +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp/llm.py +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp/main.py +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp/protocol.py +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp/types.py +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp.egg-info/dependency_links.txt +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/dacp.egg-info/top_level.txt +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/setup.cfg +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/tests/test_intelligence.py +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/tests/test_llm.py +0 -0
- {dacp-0.3.3 → dacp-0.3.4}/tests/test_protocol.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: dacp
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.4
|
4
4
|
Summary: Declarative Agent Communication Protocol - A protocol for managing LLM/agent communications and tool function calls
|
5
5
|
Author-email: Andrew Whitehouse <andrew.whitehouse@example.com>
|
6
6
|
License: MIT
|
@@ -26,20 +26,25 @@ Description-Content-Type: text/markdown
|
|
26
26
|
License-File: LICENSE
|
27
27
|
Requires-Dist: requests>=2.25.0
|
28
28
|
Requires-Dist: pyyaml>=5.4.0
|
29
|
+
Requires-Dist: pydantic>=2.0.0
|
29
30
|
Provides-Extra: openai
|
30
31
|
Requires-Dist: openai>=1.0.0; extra == "openai"
|
31
32
|
Provides-Extra: anthropic
|
32
33
|
Requires-Dist: anthropic>=0.18.0; extra == "anthropic"
|
33
34
|
Provides-Extra: local
|
34
35
|
Requires-Dist: requests>=2.25.0; extra == "local"
|
36
|
+
Provides-Extra: api
|
37
|
+
Requires-Dist: fastapi>=0.104.0; extra == "api"
|
38
|
+
Requires-Dist: uvicorn[standard]>=0.24.0; extra == "api"
|
35
39
|
Provides-Extra: all
|
36
40
|
Requires-Dist: openai>=1.0.0; extra == "all"
|
37
41
|
Requires-Dist: anthropic>=0.18.0; extra == "all"
|
42
|
+
Requires-Dist: fastapi>=0.104.0; extra == "all"
|
43
|
+
Requires-Dist: uvicorn[standard]>=0.24.0; extra == "all"
|
38
44
|
Provides-Extra: dev
|
39
45
|
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
40
46
|
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
41
|
-
Requires-Dist:
|
42
|
-
Requires-Dist: flake8>=4.0.0; extra == "dev"
|
47
|
+
Requires-Dist: ruff>=0.1.0; extra == "dev"
|
43
48
|
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
44
49
|
Requires-Dist: types-requests>=2.25.0; extra == "dev"
|
45
50
|
Requires-Dist: types-PyYAML>=6.0.0; extra == "dev"
|
dacp-0.3.4/dacp/api.py
ADDED
@@ -0,0 +1,365 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
DACP REST API Server
|
4
|
+
|
5
|
+
Provides HTTP endpoints for running workflows and managing agents.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import json
|
9
|
+
import logging
|
10
|
+
import time
|
11
|
+
import uuid
|
12
|
+
from pathlib import Path
|
13
|
+
from typing import Dict, Any, Optional
|
14
|
+
|
15
|
+
from fastapi import FastAPI, HTTPException, BackgroundTasks
|
16
|
+
from fastapi.middleware.cors import CORSMiddleware
|
17
|
+
from pydantic import BaseModel, Field
|
18
|
+
import uvicorn
|
19
|
+
|
20
|
+
import dacp
|
21
|
+
from dacp.workflow_runtime import WorkflowRuntime
|
22
|
+
|
23
|
+
|
24
|
+
# Configure logging
|
25
|
+
logging.basicConfig(level=logging.INFO)
|
26
|
+
logger = logging.getLogger("dacp.api")
|
27
|
+
|
28
|
+
|
29
|
+
# Pydantic models for API requests/responses
|
30
|
+
class WorkflowExecuteRequest(BaseModel):
|
31
|
+
workflow_file: str = Field(..., description="Path to workflow YAML file")
|
32
|
+
workflow_name: str = Field(..., description="Name of workflow to execute")
|
33
|
+
input_data: Dict[str, Any] = Field(default_factory=dict, description="Input data for workflow")
|
34
|
+
session_id: Optional[str] = Field(None, description="Custom session ID")
|
35
|
+
wait_for_completion: bool = Field(True, description="Wait for workflow completion or return immediately")
|
36
|
+
|
37
|
+
|
38
|
+
class WorkflowStatusResponse(BaseModel):
|
39
|
+
workflow_id: str
|
40
|
+
status: str
|
41
|
+
created_at: float
|
42
|
+
started_at: Optional[float]
|
43
|
+
completed_at: Optional[float]
|
44
|
+
duration: Optional[float]
|
45
|
+
result: Optional[Dict[str, Any]]
|
46
|
+
error: Optional[str]
|
47
|
+
steps: Optional[Dict[str, Any]]
|
48
|
+
|
49
|
+
|
50
|
+
class WorkflowListResponse(BaseModel):
|
51
|
+
workflows: Dict[str, Dict[str, Any]]
|
52
|
+
agents: list
|
53
|
+
|
54
|
+
|
55
|
+
class HealthResponse(BaseModel):
|
56
|
+
status: str
|
57
|
+
version: str
|
58
|
+
uptime: float
|
59
|
+
active_workflows: int
|
60
|
+
|
61
|
+
|
62
|
+
# Global state
|
63
|
+
app = FastAPI(
|
64
|
+
title="DACP REST API",
|
65
|
+
description="REST API for Declarative Agent Communication Protocol",
|
66
|
+
version="0.3.3"
|
67
|
+
)
|
68
|
+
|
69
|
+
# Add CORS middleware
|
70
|
+
app.add_middleware(
|
71
|
+
CORSMiddleware,
|
72
|
+
allow_origins=["*"], # Configure appropriately for production
|
73
|
+
allow_credentials=True,
|
74
|
+
allow_methods=["*"],
|
75
|
+
allow_headers=["*"],
|
76
|
+
)
|
77
|
+
|
78
|
+
# Global runtime and session tracking
|
79
|
+
runtime: Optional[WorkflowRuntime] = None
|
80
|
+
orchestrator: Optional[dacp.Orchestrator] = None
|
81
|
+
start_time = time.time()
|
82
|
+
active_workflows: Dict[str, Dict[str, Any]] = {}
|
83
|
+
|
84
|
+
|
85
|
+
def get_runtime() -> WorkflowRuntime:
|
86
|
+
"""Get or create the global workflow runtime."""
|
87
|
+
global runtime, orchestrator
|
88
|
+
|
89
|
+
if runtime is None:
|
90
|
+
orchestrator = dacp.Orchestrator(session_id=f"api-{int(time.time())}")
|
91
|
+
runtime = dacp.WorkflowRuntime(orchestrator=orchestrator)
|
92
|
+
logger.info("🚀 DACP API runtime initialized")
|
93
|
+
|
94
|
+
return runtime
|
95
|
+
|
96
|
+
|
97
|
+
@app.on_event("startup")
|
98
|
+
async def startup_event():
|
99
|
+
"""Initialize the API on startup."""
|
100
|
+
get_runtime()
|
101
|
+
logger.info("🎭 DACP REST API server started")
|
102
|
+
|
103
|
+
|
104
|
+
@app.get("/", response_model=Dict[str, str])
|
105
|
+
async def root():
|
106
|
+
"""Root endpoint with API information."""
|
107
|
+
return {
|
108
|
+
"message": "DACP REST API",
|
109
|
+
"version": "0.3.2",
|
110
|
+
"docs": "/docs",
|
111
|
+
"health": "/health"
|
112
|
+
}
|
113
|
+
|
114
|
+
|
115
|
+
@app.get("/health", response_model=HealthResponse)
|
116
|
+
async def health_check():
|
117
|
+
"""Health check endpoint."""
|
118
|
+
global active_workflows
|
119
|
+
|
120
|
+
return HealthResponse(
|
121
|
+
status="healthy",
|
122
|
+
version="0.3.3",
|
123
|
+
uptime=time.time() - start_time,
|
124
|
+
active_workflows=len(active_workflows)
|
125
|
+
)
|
126
|
+
|
127
|
+
|
128
|
+
@app.post("/workflows/execute", response_model=WorkflowStatusResponse)
|
129
|
+
async def execute_workflow(request: WorkflowExecuteRequest, background_tasks: BackgroundTasks):
|
130
|
+
"""Execute a workflow."""
|
131
|
+
try:
|
132
|
+
runtime = get_runtime()
|
133
|
+
|
134
|
+
# Validate workflow file exists
|
135
|
+
workflow_path = Path(request.workflow_file)
|
136
|
+
if not workflow_path.exists():
|
137
|
+
raise HTTPException(status_code=404, detail=f"Workflow file not found: {request.workflow_file}")
|
138
|
+
|
139
|
+
# Load workflow configuration
|
140
|
+
runtime.load_workflow_config(str(workflow_path))
|
141
|
+
|
142
|
+
# Generate workflow ID
|
143
|
+
workflow_id = str(uuid.uuid4())
|
144
|
+
|
145
|
+
# Create initial status
|
146
|
+
status = WorkflowStatusResponse(
|
147
|
+
workflow_id=workflow_id,
|
148
|
+
status="pending",
|
149
|
+
created_at=time.time(),
|
150
|
+
started_at=None,
|
151
|
+
completed_at=None,
|
152
|
+
duration=None,
|
153
|
+
result=None,
|
154
|
+
error=None,
|
155
|
+
steps=None
|
156
|
+
)
|
157
|
+
|
158
|
+
# Track active workflow
|
159
|
+
active_workflows[workflow_id] = {
|
160
|
+
"status": status,
|
161
|
+
"request": request,
|
162
|
+
"created_at": time.time()
|
163
|
+
}
|
164
|
+
|
165
|
+
if request.wait_for_completion:
|
166
|
+
# Execute synchronously
|
167
|
+
try:
|
168
|
+
status.started_at = time.time()
|
169
|
+
status.status = "running"
|
170
|
+
|
171
|
+
# Execute workflow
|
172
|
+
actual_workflow_id = runtime.execute_workflow(request.workflow_name, request.input_data)
|
173
|
+
|
174
|
+
# Get results
|
175
|
+
workflow_status = runtime.get_workflow_status(actual_workflow_id)
|
176
|
+
|
177
|
+
status.completed_at = time.time()
|
178
|
+
status.duration = status.completed_at - status.started_at
|
179
|
+
status.status = "completed"
|
180
|
+
status.result = workflow_status
|
181
|
+
|
182
|
+
# Clean up
|
183
|
+
if workflow_id in active_workflows:
|
184
|
+
del active_workflows[workflow_id]
|
185
|
+
|
186
|
+
logger.info(f"✅ Workflow {workflow_id} completed in {status.duration:.2f}s")
|
187
|
+
|
188
|
+
except Exception as e:
|
189
|
+
status.completed_at = time.time()
|
190
|
+
status.duration = status.completed_at - status.started_at
|
191
|
+
status.status = "failed"
|
192
|
+
status.error = str(e)
|
193
|
+
|
194
|
+
# Clean up
|
195
|
+
if workflow_id in active_workflows:
|
196
|
+
del active_workflows[workflow_id]
|
197
|
+
|
198
|
+
logger.error(f"❌ Workflow {workflow_id} failed: {e}")
|
199
|
+
|
200
|
+
else:
|
201
|
+
# Execute asynchronously
|
202
|
+
background_tasks.add_task(
|
203
|
+
execute_workflow_async,
|
204
|
+
workflow_id,
|
205
|
+
request,
|
206
|
+
runtime
|
207
|
+
)
|
208
|
+
logger.info(f"🚀 Workflow {workflow_id} queued for async execution")
|
209
|
+
|
210
|
+
return status
|
211
|
+
|
212
|
+
except Exception as e:
|
213
|
+
logger.error(f"❌ Workflow execution failed: {e}")
|
214
|
+
raise HTTPException(status_code=500, detail=str(e))
|
215
|
+
|
216
|
+
|
217
|
+
async def execute_workflow_async(workflow_id: str, request: WorkflowExecuteRequest, runtime: WorkflowRuntime):
|
218
|
+
"""Execute a workflow asynchronously."""
|
219
|
+
try:
|
220
|
+
# Update status to running
|
221
|
+
if workflow_id in active_workflows:
|
222
|
+
active_workflows[workflow_id]["status"].started_at = time.time()
|
223
|
+
active_workflows[workflow_id]["status"].status = "running"
|
224
|
+
|
225
|
+
# Execute workflow
|
226
|
+
actual_workflow_id = runtime.execute_workflow(request.workflow_name, request.input_data)
|
227
|
+
|
228
|
+
# Get results
|
229
|
+
workflow_status = runtime.get_workflow_status(actual_workflow_id)
|
230
|
+
|
231
|
+
# Update status
|
232
|
+
if workflow_id in active_workflows:
|
233
|
+
status = active_workflows[workflow_id]["status"]
|
234
|
+
status.completed_at = time.time()
|
235
|
+
status.duration = status.completed_at - status.started_at
|
236
|
+
status.status = "completed"
|
237
|
+
status.result = workflow_status
|
238
|
+
|
239
|
+
# Clean up
|
240
|
+
del active_workflows[workflow_id]
|
241
|
+
|
242
|
+
logger.info(f"✅ Async workflow {workflow_id} completed")
|
243
|
+
|
244
|
+
except Exception as e:
|
245
|
+
# Update status with error
|
246
|
+
if workflow_id in active_workflows:
|
247
|
+
status = active_workflows[workflow_id]["status"]
|
248
|
+
status.completed_at = time.time()
|
249
|
+
status.duration = status.completed_at - status.started_at
|
250
|
+
status.status = "failed"
|
251
|
+
status.error = str(e)
|
252
|
+
|
253
|
+
# Clean up
|
254
|
+
del active_workflows[workflow_id]
|
255
|
+
|
256
|
+
logger.error(f"❌ Async workflow {workflow_id} failed: {e}")
|
257
|
+
|
258
|
+
|
259
|
+
@app.get("/workflows/{workflow_id}/status", response_model=WorkflowStatusResponse)
|
260
|
+
async def get_workflow_status(workflow_id: str):
|
261
|
+
"""Get the status of a workflow execution."""
|
262
|
+
if workflow_id in active_workflows:
|
263
|
+
return active_workflows[workflow_id]["status"]
|
264
|
+
else:
|
265
|
+
raise HTTPException(status_code=404, detail=f"Workflow {workflow_id} not found")
|
266
|
+
|
267
|
+
|
268
|
+
@app.get("/workflows", response_model=Dict[str, Any])
|
269
|
+
async def list_active_workflows():
|
270
|
+
"""List all active workflows."""
|
271
|
+
return {
|
272
|
+
"active_workflows": len(active_workflows),
|
273
|
+
"workflows": [
|
274
|
+
{
|
275
|
+
"workflow_id": wf_id,
|
276
|
+
"status": wf_data["status"].status,
|
277
|
+
"created_at": wf_data["created_at"],
|
278
|
+
"workflow_name": wf_data["request"].workflow_name
|
279
|
+
}
|
280
|
+
for wf_id, wf_data in active_workflows.items()
|
281
|
+
]
|
282
|
+
}
|
283
|
+
|
284
|
+
|
285
|
+
@app.get("/workflows/config/{workflow_file}", response_model=WorkflowListResponse)
|
286
|
+
async def get_workflow_config(workflow_file: str):
|
287
|
+
"""Get workflow configuration and list available workflows."""
|
288
|
+
try:
|
289
|
+
workflow_path = Path(workflow_file)
|
290
|
+
if not workflow_path.exists():
|
291
|
+
raise HTTPException(status_code=404, detail=f"Workflow file not found: {workflow_file}")
|
292
|
+
|
293
|
+
# Load workflow configuration
|
294
|
+
import yaml
|
295
|
+
with open(workflow_path, 'r') as f:
|
296
|
+
config = yaml.safe_load(f)
|
297
|
+
|
298
|
+
return WorkflowListResponse(
|
299
|
+
workflows=config.get('workflows', {}),
|
300
|
+
agents=config.get('agents', [])
|
301
|
+
)
|
302
|
+
|
303
|
+
except Exception as e:
|
304
|
+
logger.error(f"❌ Failed to load workflow config: {e}")
|
305
|
+
raise HTTPException(status_code=500, detail=str(e))
|
306
|
+
|
307
|
+
|
308
|
+
@app.delete("/workflows/{workflow_id}")
|
309
|
+
async def cancel_workflow(workflow_id: str):
|
310
|
+
"""Cancel a running workflow."""
|
311
|
+
if workflow_id not in active_workflows:
|
312
|
+
raise HTTPException(status_code=404, detail=f"Workflow {workflow_id} not found")
|
313
|
+
|
314
|
+
# Update status to cancelled
|
315
|
+
active_workflows[workflow_id]["status"].status = "cancelled"
|
316
|
+
active_workflows[workflow_id]["status"].completed_at = time.time()
|
317
|
+
|
318
|
+
# Clean up
|
319
|
+
del active_workflows[workflow_id]
|
320
|
+
|
321
|
+
logger.info(f"🚫 Workflow {workflow_id} cancelled")
|
322
|
+
return {"message": f"Workflow {workflow_id} cancelled"}
|
323
|
+
|
324
|
+
|
325
|
+
@app.get("/agents", response_model=Dict[str, Any])
|
326
|
+
async def list_agents():
|
327
|
+
"""List registered agents."""
|
328
|
+
runtime = get_runtime()
|
329
|
+
agents = runtime.agent_registry.list_agents()
|
330
|
+
|
331
|
+
agent_info = []
|
332
|
+
for agent_id in agents:
|
333
|
+
info = runtime.agent_registry.get_agent_info(agent_id)
|
334
|
+
if info:
|
335
|
+
agent_info.append(info)
|
336
|
+
|
337
|
+
return {
|
338
|
+
"agents": agent_info,
|
339
|
+
"total": len(agents)
|
340
|
+
}
|
341
|
+
|
342
|
+
|
343
|
+
def start_server(host: str = "0.0.0.0", port: int = 8000, reload: bool = False):
|
344
|
+
"""Start the DACP API server."""
|
345
|
+
logger.info(f"🚀 Starting DACP REST API server on {host}:{port}")
|
346
|
+
|
347
|
+
uvicorn.run(
|
348
|
+
"dacp.api:app",
|
349
|
+
host=host,
|
350
|
+
port=port,
|
351
|
+
reload=reload,
|
352
|
+
log_level="info"
|
353
|
+
)
|
354
|
+
|
355
|
+
|
356
|
+
if __name__ == "__main__":
|
357
|
+
import argparse
|
358
|
+
|
359
|
+
parser = argparse.ArgumentParser(description="DACP REST API Server")
|
360
|
+
parser.add_argument("--host", default="0.0.0.0", help="Host to bind to")
|
361
|
+
parser.add_argument("--port", type=int, default=8000, help="Port to bind to")
|
362
|
+
parser.add_argument("--reload", action="store_true", help="Enable auto-reload")
|
363
|
+
|
364
|
+
args = parser.parse_args()
|
365
|
+
start_server(args.host, args.port, args.reload)
|
dacp-0.3.4/dacp/cli.py
ADDED
@@ -0,0 +1,249 @@
|
|
1
|
+
#!/usr/bin/env python3
|
2
|
+
"""
|
3
|
+
DACP Command Line Interface
|
4
|
+
|
5
|
+
Provides a simple CLI for running workflows and managing agents.
|
6
|
+
"""
|
7
|
+
|
8
|
+
import argparse
|
9
|
+
import json
|
10
|
+
import sys
|
11
|
+
import yaml
|
12
|
+
from pathlib import Path
|
13
|
+
from typing import Dict, Any
|
14
|
+
|
15
|
+
import dacp
|
16
|
+
from dacp.workflow_runtime import WorkflowRuntime
|
17
|
+
|
18
|
+
|
19
|
+
def setup_logging(args):
|
20
|
+
"""Setup DACP logging based on CLI arguments."""
|
21
|
+
log_config = {
|
22
|
+
"level": args.log_level or "INFO",
|
23
|
+
"format_style": args.log_style or "emoji",
|
24
|
+
"include_timestamp": args.timestamp,
|
25
|
+
}
|
26
|
+
|
27
|
+
if args.log_file:
|
28
|
+
log_config["log_file"] = args.log_file
|
29
|
+
|
30
|
+
dacp.setup_dacp_logging(**log_config)
|
31
|
+
|
32
|
+
|
33
|
+
def load_workflow_config(workflow_path: str) -> Dict[str, Any]:
|
34
|
+
"""Load workflow configuration from YAML file."""
|
35
|
+
workflow_file = Path(workflow_path)
|
36
|
+
if not workflow_file.exists():
|
37
|
+
raise FileNotFoundError(f"Workflow file not found: {workflow_path}")
|
38
|
+
|
39
|
+
with open(workflow_file, 'r') as f:
|
40
|
+
return yaml.safe_load(f)
|
41
|
+
|
42
|
+
|
43
|
+
def parse_input_data(input_args: list) -> Dict[str, Any]:
|
44
|
+
"""Parse input data from CLI arguments."""
|
45
|
+
input_data = {}
|
46
|
+
|
47
|
+
for arg in input_args:
|
48
|
+
if '=' in arg:
|
49
|
+
key, value = arg.split('=', 1)
|
50
|
+
# Try to parse as JSON, fallback to string
|
51
|
+
try:
|
52
|
+
input_data[key] = json.loads(value)
|
53
|
+
except json.JSONDecodeError:
|
54
|
+
input_data[key] = value
|
55
|
+
else:
|
56
|
+
# Single value, treat as string
|
57
|
+
input_data[arg] = ""
|
58
|
+
|
59
|
+
return input_data
|
60
|
+
|
61
|
+
|
62
|
+
def run_workflow(args):
|
63
|
+
"""Run a workflow with the given configuration."""
|
64
|
+
try:
|
65
|
+
# Setup logging
|
66
|
+
setup_logging(args)
|
67
|
+
|
68
|
+
# Load workflow configuration
|
69
|
+
workflow_config = load_workflow_config(args.workflow)
|
70
|
+
|
71
|
+
# Create orchestrator and runtime
|
72
|
+
orchestrator = dacp.Orchestrator(session_id=args.session_id)
|
73
|
+
runtime = dacp.WorkflowRuntime(orchestrator=orchestrator)
|
74
|
+
|
75
|
+
# Load workflow
|
76
|
+
runtime.load_workflow_config(args.workflow)
|
77
|
+
|
78
|
+
# Parse input data
|
79
|
+
input_data = parse_input_data(args.input or [])
|
80
|
+
|
81
|
+
# Load and register agents
|
82
|
+
agents_config = workflow_config.get('agents', [])
|
83
|
+
for agent_config in agents_config:
|
84
|
+
agent_id = agent_config.get('id') or agent_config.get('name') # Support both 'id' and 'name'
|
85
|
+
agent_spec = agent_config.get('spec')
|
86
|
+
|
87
|
+
if agent_id and agent_spec:
|
88
|
+
print(f"📋 Loading agent: {agent_id} from {agent_spec}")
|
89
|
+
|
90
|
+
# Check if spec is a Python module path (contains :)
|
91
|
+
if ':' in agent_spec:
|
92
|
+
# Format: module.path:ClassName
|
93
|
+
try:
|
94
|
+
module_path, class_name = agent_spec.split(':')
|
95
|
+
|
96
|
+
# Convert module path to file path
|
97
|
+
file_path = module_path.replace('.', '/') + '.py'
|
98
|
+
|
99
|
+
# Load the agent using importlib
|
100
|
+
import importlib.util
|
101
|
+
spec = importlib.util.spec_from_file_location(f"{agent_id}_module", file_path)
|
102
|
+
agent_module = importlib.util.module_from_spec(spec)
|
103
|
+
spec.loader.exec_module(agent_module)
|
104
|
+
|
105
|
+
# Get the agent class and instantiate it
|
106
|
+
agent_class = getattr(agent_module, class_name)
|
107
|
+
agent_instance = agent_class(agent_id=agent_id, orchestrator=orchestrator)
|
108
|
+
|
109
|
+
# Register with runtime
|
110
|
+
runtime.register_agent_from_config(agent_id, agent_instance)
|
111
|
+
print(f"✅ Agent {agent_id} loaded and registered")
|
112
|
+
|
113
|
+
except Exception as e:
|
114
|
+
print(f"❌ Failed to load agent {agent_id}: {e}")
|
115
|
+
continue
|
116
|
+
|
117
|
+
else:
|
118
|
+
# Legacy YAML file approach
|
119
|
+
agent_path = Path(args.workflow).parent / agent_spec
|
120
|
+
if agent_path.exists():
|
121
|
+
print(f"⚠️ YAML agent specs not yet supported: {agent_spec}")
|
122
|
+
else:
|
123
|
+
print(f"❌ Agent spec file not found: {agent_spec}")
|
124
|
+
|
125
|
+
# Execute workflow
|
126
|
+
print(f"🚀 Executing workflow: {args.workflow_name}")
|
127
|
+
workflow_id = runtime.execute_workflow(args.workflow_name, input_data)
|
128
|
+
|
129
|
+
# Get results
|
130
|
+
status = runtime.get_workflow_status(workflow_id)
|
131
|
+
|
132
|
+
if args.output:
|
133
|
+
with open(args.output, 'w') as f:
|
134
|
+
json.dump(status, f, indent=2)
|
135
|
+
print(f"📄 Results saved to: {args.output}")
|
136
|
+
else:
|
137
|
+
print("📊 Workflow Results:")
|
138
|
+
print(json.dumps(status, indent=2))
|
139
|
+
|
140
|
+
return 0
|
141
|
+
|
142
|
+
except Exception as e:
|
143
|
+
print(f"❌ Workflow execution failed: {e}")
|
144
|
+
return 1
|
145
|
+
|
146
|
+
|
147
|
+
def list_workflows(args):
|
148
|
+
"""List available workflows in a configuration file."""
|
149
|
+
try:
|
150
|
+
workflow_config = load_workflow_config(args.workflow)
|
151
|
+
workflows = workflow_config.get('workflows', {})
|
152
|
+
|
153
|
+
print(f"📋 Available workflows in {args.workflow}:")
|
154
|
+
for name, config in workflows.items():
|
155
|
+
description = config.get('description', 'No description')
|
156
|
+
steps = len(config.get('steps', []))
|
157
|
+
print(f" • {name}: {description} ({steps} steps)")
|
158
|
+
|
159
|
+
return 0
|
160
|
+
|
161
|
+
except Exception as e:
|
162
|
+
print(f"❌ Failed to list workflows: {e}")
|
163
|
+
return 1
|
164
|
+
|
165
|
+
|
166
|
+
def main():
|
167
|
+
"""Main CLI entry point."""
|
168
|
+
parser = argparse.ArgumentParser(
|
169
|
+
description="DACP - Declarative Agent Communication Protocol CLI",
|
170
|
+
formatter_class=argparse.RawDescriptionHelpFormatter,
|
171
|
+
epilog="""
|
172
|
+
Examples:
|
173
|
+
# Run a workflow
|
174
|
+
dacp run workflow examples/github-actions-error-workflow.yaml \\
|
175
|
+
--workflow-name quick_error_analysis \\
|
176
|
+
--input job_name="build-and-test" \\
|
177
|
+
--input raw_logs="npm ERR! code ENOENT..." \\
|
178
|
+
--input repository="myorg/myproject"
|
179
|
+
|
180
|
+
# List available workflows
|
181
|
+
dacp list workflows examples/github-actions-error-workflow.yaml
|
182
|
+
|
183
|
+
# Run with custom logging
|
184
|
+
dacp run workflow workflow.yaml \\
|
185
|
+
--log-level DEBUG \\
|
186
|
+
--log-style detailed \\
|
187
|
+
--output results.json
|
188
|
+
"""
|
189
|
+
)
|
190
|
+
|
191
|
+
subparsers = parser.add_subparsers(dest='command', help='Available commands')
|
192
|
+
|
193
|
+
# Run workflow command
|
194
|
+
run_parser = subparsers.add_parser('run', help='Run workflows and agents')
|
195
|
+
run_subparsers = run_parser.add_subparsers(dest='run_command', help='Run commands')
|
196
|
+
|
197
|
+
workflow_parser = run_subparsers.add_parser('workflow', help='Run a workflow')
|
198
|
+
workflow_parser.add_argument('workflow', help='Path to workflow YAML file')
|
199
|
+
workflow_parser.add_argument('--workflow-name', required=True, help='Name of workflow to execute')
|
200
|
+
workflow_parser.add_argument('--input', action='append', help='Input data (key=value or just value)')
|
201
|
+
workflow_parser.add_argument('--output', help='Output file for results (JSON)')
|
202
|
+
workflow_parser.add_argument('--session-id', help='Custom session ID')
|
203
|
+
|
204
|
+
# Logging options
|
205
|
+
workflow_parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'], help='Log level')
|
206
|
+
workflow_parser.add_argument('--log-style', choices=['emoji', 'detailed', 'simple'], help='Log style')
|
207
|
+
workflow_parser.add_argument('--log-file', help='Log file path')
|
208
|
+
workflow_parser.add_argument('--timestamp', action='store_true', help='Include timestamps in logs')
|
209
|
+
|
210
|
+
# List command
|
211
|
+
list_parser = subparsers.add_parser('list', help='List available resources')
|
212
|
+
list_subparsers = list_parser.add_subparsers(dest='list_command', help='List commands')
|
213
|
+
|
214
|
+
workflows_parser = list_subparsers.add_parser('workflows', help='List workflows in a file')
|
215
|
+
workflows_parser.add_argument('workflow', help='Path to workflow YAML file')
|
216
|
+
|
217
|
+
# Version command
|
218
|
+
subparsers.add_parser('version', help='Show DACP version')
|
219
|
+
|
220
|
+
# Serve command for REST API
|
221
|
+
serve_parser = subparsers.add_parser('serve', help='Start REST API server')
|
222
|
+
serve_parser.add_argument('--host', default='0.0.0.0', help='Host to bind to')
|
223
|
+
serve_parser.add_argument('--port', type=int, default=8000, help='Port to bind to')
|
224
|
+
serve_parser.add_argument('--reload', action='store_true', help='Enable auto-reload')
|
225
|
+
|
226
|
+
args = parser.parse_args()
|
227
|
+
|
228
|
+
if args.command == 'run' and args.run_command == 'workflow':
|
229
|
+
return run_workflow(args)
|
230
|
+
elif args.command == 'list' and args.list_command == 'workflows':
|
231
|
+
return list_workflows(args)
|
232
|
+
elif args.command == 'version':
|
233
|
+
print(f"DACP version {dacp.__version__}")
|
234
|
+
return 0
|
235
|
+
elif args.command == 'serve':
|
236
|
+
try:
|
237
|
+
from dacp.api import start_server
|
238
|
+
start_server(args.host, args.port, args.reload)
|
239
|
+
return 0
|
240
|
+
except ImportError:
|
241
|
+
print("❌ REST API dependencies not installed. Run: pip install dacp[api]")
|
242
|
+
return 1
|
243
|
+
else:
|
244
|
+
parser.print_help()
|
245
|
+
return 1
|
246
|
+
|
247
|
+
|
248
|
+
if __name__ == '__main__':
|
249
|
+
sys.exit(main())
|