dacp 0.3.2__tar.gz → 0.3.3__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dacp
3
- Version: 0.3.2
3
+ Version: 0.3.3
4
4
  Summary: Declarative Agent Communication Protocol - A protocol for managing LLM/agent communications and tool function calls
5
5
  Author-email: Andrew Whitehouse <andrew.whitehouse@example.com>
6
6
  License: MIT
@@ -24,6 +24,28 @@ from .logging_config import (
24
24
  disable_dacp_logging,
25
25
  enable_dacp_logging,
26
26
  )
27
+ from .json_parser import (
28
+ robust_json_parse,
29
+ parse_with_fallback,
30
+ extract_json_from_text,
31
+ create_fallback_response,
32
+ )
33
+ from .workflow import (
34
+ WorkflowOrchestrator,
35
+ TaskBoard,
36
+ Task,
37
+ TaskStatus,
38
+ TaskPriority,
39
+ WorkflowRule,
40
+ )
41
+ from .workflow_runtime import (
42
+ WorkflowRuntime,
43
+ AgentRegistry,
44
+ TaskRegistry,
45
+ TaskExecution,
46
+ RegisteredAgent,
47
+ TaskStatus as RuntimeTaskStatus,
48
+ )
27
49
 
28
50
  __version__ = "0.3.0"
29
51
 
@@ -54,4 +76,23 @@ __all__ = [
54
76
  "set_dacp_log_level",
55
77
  "disable_dacp_logging",
56
78
  "enable_dacp_logging",
79
+ # JSON parsing utilities
80
+ "robust_json_parse",
81
+ "parse_with_fallback",
82
+ "extract_json_from_text",
83
+ "create_fallback_response",
84
+ # Workflow management
85
+ "WorkflowOrchestrator",
86
+ "TaskBoard",
87
+ "Task",
88
+ "TaskStatus",
89
+ "TaskPriority",
90
+ "WorkflowRule",
91
+ # Workflow runtime
92
+ "WorkflowRuntime",
93
+ "AgentRegistry",
94
+ "TaskRegistry",
95
+ "TaskExecution",
96
+ "RegisteredAgent",
97
+ "RuntimeTaskStatus",
57
98
  ]
@@ -0,0 +1,232 @@
1
+ """
2
+ DACP JSON Parser - Robust JSON parsing for agent responses.
3
+
4
+ This module provides enhanced JSON parsing capabilities that can handle
5
+ various LLM response formats and provide intelligent fallbacks.
6
+ """
7
+
8
+ import json
9
+ import re
10
+ import logging
11
+ from typing import Dict, Any, Optional, Union
12
+ from pydantic import BaseModel
13
+
14
+ logger = logging.getLogger("dacp.json_parser")
15
+
16
+
17
+ def extract_json_from_text(text: str) -> Optional[Dict[str, Any]]:
18
+ """
19
+ Extract JSON from text using multiple strategies.
20
+
21
+ Args:
22
+ text: Raw text that might contain JSON
23
+
24
+ Returns:
25
+ Parsed JSON dict or None if no valid JSON found
26
+ """
27
+ if not isinstance(text, str):
28
+ return None
29
+
30
+ logger.debug(f"🔍 Attempting to extract JSON from text: {text[:100]}...")
31
+
32
+ # Strategy 1: Try parsing the entire text as JSON
33
+ try:
34
+ result = json.loads(text.strip())
35
+ logger.debug("✅ Successfully parsed entire text as JSON")
36
+ return result
37
+ except json.JSONDecodeError:
38
+ logger.debug("❌ Failed to parse entire text as JSON")
39
+
40
+ # Strategy 2: Find JSON between braces
41
+ json_start = text.find('{')
42
+ json_end = text.rfind('}') + 1
43
+ if json_start >= 0 and json_end > json_start:
44
+ json_str = text[json_start:json_end]
45
+ try:
46
+ result = json.loads(json_str)
47
+ logger.debug("✅ Successfully extracted JSON between braces")
48
+ return result
49
+ except json.JSONDecodeError:
50
+ logger.debug("❌ Failed to parse JSON between braces")
51
+
52
+ # Strategy 3: Find JSON in code blocks
53
+ code_block_pattern = r'```(?:json)?\s*(\{.*?\})\s*```'
54
+ matches = re.findall(code_block_pattern, text, re.DOTALL)
55
+ for match in matches:
56
+ try:
57
+ result = json.loads(match)
58
+ logger.debug("✅ Successfully extracted JSON from code block")
59
+ return result
60
+ except json.JSONDecodeError:
61
+ continue
62
+
63
+ # Strategy 4: Find JSON after common prefixes
64
+ prefixes = [
65
+ "json response:",
66
+ "response:",
67
+ "output:",
68
+ "result:",
69
+ "here is the json:",
70
+ "the json is:",
71
+ ]
72
+
73
+ for prefix in prefixes:
74
+ prefix_pos = text.lower().find(prefix.lower())
75
+ if prefix_pos >= 0:
76
+ remaining_text = text[prefix_pos + len(prefix):].strip()
77
+ extracted = extract_json_from_text(remaining_text)
78
+ if extracted:
79
+ logger.debug(f"✅ Successfully extracted JSON after prefix: {prefix}")
80
+ return extracted
81
+
82
+ logger.debug("❌ No valid JSON found in text")
83
+ return None
84
+
85
+
86
+ def create_fallback_response(
87
+ text: str,
88
+ required_fields: Dict[str, Any],
89
+ optional_fields: Dict[str, Any] = None
90
+ ) -> Dict[str, Any]:
91
+ """
92
+ Create a fallback response when JSON parsing fails.
93
+
94
+ Args:
95
+ text: Original LLM response text
96
+ required_fields: Dictionary of required field names and default values
97
+ optional_fields: Dictionary of optional field names and default values
98
+
99
+ Returns:
100
+ Dictionary with required fields filled
101
+ """
102
+ logger.info(f"🔄 Creating fallback response for text: {text[:50]}...")
103
+
104
+ fallback = {}
105
+
106
+ # Fill required fields with defaults or extracted content
107
+ for field_name, default_value in required_fields.items():
108
+ if field_name in ["message", "response_message", "greeting_message"]:
109
+ # Use the original text as the message
110
+ fallback[field_name] = text.strip()
111
+ logger.debug(f"📝 Using text as {field_name}")
112
+ elif field_name in ["agent", "sender_agent", "target_agent"]:
113
+ # Try to extract agent names or use default
114
+ agent_match = re.search(r'agent[:\s]+([a-zA-Z0-9_-]+)', text, re.IGNORECASE)
115
+ if agent_match:
116
+ fallback[field_name] = agent_match.group(1)
117
+ logger.debug(f"🎯 Extracted agent name: {agent_match.group(1)}")
118
+ else:
119
+ fallback[field_name] = default_value or "unknown"
120
+ logger.debug(f"🔧 Using default for {field_name}: {fallback[field_name]}")
121
+ else:
122
+ fallback[field_name] = default_value
123
+ logger.debug(f"⚙️ Setting {field_name} to default: {default_value}")
124
+
125
+ # Fill optional fields if provided
126
+ if optional_fields:
127
+ for field_name, default_value in optional_fields.items():
128
+ fallback[field_name] = default_value
129
+ logger.debug(f"📋 Adding optional field {field_name}: {default_value}")
130
+
131
+ logger.info(f"✅ Created fallback response with {len(fallback)} fields")
132
+ return fallback
133
+
134
+
135
+ def robust_json_parse(
136
+ response: Union[str, dict, BaseModel],
137
+ target_model: type,
138
+ required_fields: Dict[str, Any],
139
+ optional_fields: Dict[str, Any] = None
140
+ ) -> BaseModel:
141
+ """
142
+ Robust JSON parsing with intelligent fallbacks.
143
+
144
+ Args:
145
+ response: LLM response (string, dict, or Pydantic model)
146
+ target_model: Pydantic model class to create
147
+ required_fields: Required fields with default values
148
+ optional_fields: Optional fields with default values
149
+
150
+ Returns:
151
+ Instance of target_model
152
+
153
+ Raises:
154
+ ValueError: If parsing fails completely
155
+ """
156
+ logger.debug(f"🔧 Parsing response of type {type(response).__name__} into {target_model.__name__}")
157
+
158
+ # If already the target model, return as-is
159
+ if isinstance(response, target_model):
160
+ logger.debug("✅ Response is already target model")
161
+ return response
162
+
163
+ # If dict, try to create model directly
164
+ if isinstance(response, dict):
165
+ try:
166
+ result = target_model(**response)
167
+ logger.debug("✅ Successfully created model from dict")
168
+ return result
169
+ except Exception as e:
170
+ logger.debug(f"❌ Failed to create model from dict: {e}")
171
+
172
+ # If string, try JSON extraction
173
+ if isinstance(response, str):
174
+ extracted_json = extract_json_from_text(response)
175
+
176
+ if extracted_json:
177
+ try:
178
+ result = target_model(**extracted_json)
179
+ logger.debug("✅ Successfully created model from extracted JSON")
180
+ return result
181
+ except Exception as e:
182
+ logger.debug(f"❌ Failed to create model from extracted JSON: {e}")
183
+
184
+ # Create fallback response
185
+ logger.info("🔄 Creating fallback response for string input")
186
+ fallback_data = create_fallback_response(
187
+ response,
188
+ required_fields,
189
+ optional_fields
190
+ )
191
+
192
+ try:
193
+ result = target_model(**fallback_data)
194
+ logger.info("✅ Successfully created model from fallback data")
195
+ return result
196
+ except Exception as e:
197
+ logger.error(f"❌ Failed to create fallback response: {e}")
198
+ raise ValueError(f"Failed to create fallback response: {e}")
199
+
200
+ # Unexpected response type
201
+ error_msg = f"Unable to parse response of type {type(response)}: {response}"
202
+ logger.error(f"❌ {error_msg}")
203
+ raise ValueError(error_msg)
204
+
205
+
206
+ def parse_with_fallback(response: Any, model_class: type, **field_defaults) -> BaseModel:
207
+ """
208
+ Convenience function for parsing with automatic field detection.
209
+
210
+ Args:
211
+ response: LLM response to parse
212
+ model_class: Pydantic model class
213
+ **field_defaults: Default values for fields (field_name=default_value)
214
+
215
+ Returns:
216
+ Instance of model_class
217
+ """
218
+ # Extract required fields from model
219
+ required_fields = {}
220
+ optional_fields = {}
221
+
222
+ # Get field info from Pydantic model
223
+ if hasattr(model_class, 'model_fields'):
224
+ for field_name, field_info in model_class.model_fields.items():
225
+ default_value = field_defaults.get(field_name, "")
226
+
227
+ if field_info.is_required():
228
+ required_fields[field_name] = default_value
229
+ else:
230
+ optional_fields[field_name] = field_info.default
231
+
232
+ return robust_json_parse(response, model_class, required_fields, optional_fields)
@@ -120,6 +120,17 @@ class Orchestrator:
120
120
  # Call the agent's message handler
121
121
  response = agent.handle_message(message)
122
122
 
123
+ # Handle Pydantic models by converting to dict
124
+ if hasattr(response, 'model_dump'):
125
+ logger.debug(f"📊 Converting Pydantic model to dict: {type(response).__name__}")
126
+ response = response.model_dump()
127
+ elif not isinstance(response, dict):
128
+ logger.debug(f"📊 Converting response to dict: {type(response)}")
129
+ if hasattr(response, '__dict__'):
130
+ response = response.__dict__
131
+ else:
132
+ response = {"result": str(response)}
133
+
123
134
  duration = time.time() - start_time
124
135
  logger.info(f"✅ Agent '{agent_name}' responded in {duration:.3f}s")
125
136
  logger.debug(f"📤 Agent response: {response}")
@@ -229,6 +240,7 @@ class Orchestrator:
229
240
  entry = {
230
241
  "timestamp": time.time(),
231
242
  "session_id": self.session_id,
243
+ "agent": agent_name,
232
244
  "agent_name": agent_name,
233
245
  "message": message,
234
246
  "response": response,
@@ -0,0 +1,409 @@
1
+ """
2
+ DACP Workflow Management - Agent-to-agent communication and task routing.
3
+
4
+ This module provides workflow orchestration capabilities for multi-agent systems,
5
+ including task boards, message routing, and automated agent collaboration.
6
+ """
7
+
8
+ import logging
9
+ import time
10
+ import uuid
11
+ from typing import Dict, Any, List, Optional, Callable
12
+ from dataclasses import dataclass, field
13
+ from enum import Enum
14
+
15
+ logger = logging.getLogger("dacp.workflow")
16
+
17
+
18
+ class TaskStatus(Enum):
19
+ """Task status enumeration."""
20
+ PENDING = "pending"
21
+ ASSIGNED = "assigned"
22
+ IN_PROGRESS = "in_progress"
23
+ COMPLETED = "completed"
24
+ FAILED = "failed"
25
+ CANCELLED = "cancelled"
26
+
27
+
28
+ class TaskPriority(Enum):
29
+ """Task priority enumeration."""
30
+ LOW = 1
31
+ NORMAL = 2
32
+ HIGH = 3
33
+ URGENT = 4
34
+
35
+
36
+ @dataclass
37
+ class Task:
38
+ """Represents a task in the workflow system."""
39
+ id: str
40
+ type: str
41
+ data: Dict[str, Any]
42
+ source_agent: str
43
+ target_agent: Optional[str] = None
44
+ status: TaskStatus = TaskStatus.PENDING
45
+ priority: TaskPriority = TaskPriority.NORMAL
46
+ created_at: float = field(default_factory=time.time)
47
+ assigned_at: Optional[float] = None
48
+ completed_at: Optional[float] = None
49
+ result: Optional[Dict[str, Any]] = None
50
+ error: Optional[str] = None
51
+ dependencies: List[str] = field(default_factory=list)
52
+ metadata: Dict[str, Any] = field(default_factory=dict)
53
+
54
+ def to_dict(self) -> Dict[str, Any]:
55
+ """Convert task to dictionary representation."""
56
+ return {
57
+ "id": self.id,
58
+ "type": self.type,
59
+ "data": self.data,
60
+ "source_agent": self.source_agent,
61
+ "target_agent": self.target_agent,
62
+ "status": self.status.value,
63
+ "priority": self.priority.value,
64
+ "created_at": self.created_at,
65
+ "assigned_at": self.assigned_at,
66
+ "completed_at": self.completed_at,
67
+ "result": self.result,
68
+ "error": self.error,
69
+ "dependencies": self.dependencies,
70
+ "metadata": self.metadata,
71
+ }
72
+
73
+
74
+ @dataclass
75
+ class WorkflowRule:
76
+ """Defines routing rules for agent-to-agent communication."""
77
+ source_task_type: str
78
+ target_agent: str
79
+ target_task_type: str
80
+ condition: Optional[Callable[[Task], bool]] = None
81
+ transform_data: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None
82
+ priority: TaskPriority = TaskPriority.NORMAL
83
+
84
+
85
+ class TaskBoard:
86
+ """Central task board for managing agent-to-agent tasks."""
87
+
88
+ def __init__(self):
89
+ self.tasks: Dict[str, Task] = {}
90
+ self.agent_queues: Dict[str, List[str]] = {}
91
+ self.completed_tasks: List[str] = []
92
+ self.workflow_rules: List[WorkflowRule] = []
93
+
94
+ def add_task(
95
+ self,
96
+ task_type: str,
97
+ data: Dict[str, Any],
98
+ source_agent: str,
99
+ target_agent: Optional[str] = None,
100
+ priority: TaskPriority = TaskPriority.NORMAL,
101
+ dependencies: List[str] = None,
102
+ ) -> str:
103
+ """Add a new task to the board."""
104
+ task_id = str(uuid.uuid4())
105
+
106
+ task = Task(
107
+ id=task_id,
108
+ type=task_type,
109
+ data=data,
110
+ source_agent=source_agent,
111
+ target_agent=target_agent,
112
+ priority=priority,
113
+ dependencies=dependencies or [],
114
+ )
115
+
116
+ self.tasks[task_id] = task
117
+
118
+ # Add to appropriate agent queue
119
+ if target_agent:
120
+ if target_agent not in self.agent_queues:
121
+ self.agent_queues[target_agent] = []
122
+ self.agent_queues[target_agent].append(task_id)
123
+ task.status = TaskStatus.ASSIGNED
124
+ task.assigned_at = time.time()
125
+
126
+ logger.info(f"📋 Task '{task_id}' added: {task_type} from {source_agent} to {target_agent}")
127
+ return task_id
128
+
129
+ def get_next_task(self, agent_name: str) -> Optional[Task]:
130
+ """Get the next task for an agent."""
131
+ if agent_name not in self.agent_queues or not self.agent_queues[agent_name]:
132
+ return None
133
+
134
+ # Sort by priority and creation time
135
+ queue = self.agent_queues[agent_name]
136
+ available_tasks = []
137
+
138
+ for task_id in queue:
139
+ task = self.tasks[task_id]
140
+ if task.status == TaskStatus.ASSIGNED and self._dependencies_satisfied(task):
141
+ available_tasks.append(task)
142
+
143
+ if not available_tasks:
144
+ return None
145
+
146
+ # Sort by priority (higher first) then by creation time (older first)
147
+ available_tasks.sort(key=lambda t: (-t.priority.value, t.created_at))
148
+
149
+ next_task = available_tasks[0]
150
+ next_task.status = TaskStatus.IN_PROGRESS
151
+
152
+ logger.info(f"📤 Task '{next_task.id}' assigned to agent '{agent_name}'")
153
+ return next_task
154
+
155
+ def complete_task(
156
+ self,
157
+ task_id: str,
158
+ result: Dict[str, Any],
159
+ trigger_rules: bool = True
160
+ ) -> None:
161
+ """Mark a task as completed and trigger workflow rules."""
162
+ if task_id not in self.tasks:
163
+ logger.error(f"❌ Task '{task_id}' not found")
164
+ return
165
+
166
+ task = self.tasks[task_id]
167
+ task.status = TaskStatus.COMPLETED
168
+ task.completed_at = time.time()
169
+ task.result = result
170
+
171
+ # Remove from agent queue
172
+ if task.target_agent and task.target_agent in self.agent_queues:
173
+ if task_id in self.agent_queues[task.target_agent]:
174
+ self.agent_queues[task.target_agent].remove(task_id)
175
+
176
+ self.completed_tasks.append(task_id)
177
+
178
+ logger.info(f"✅ Task '{task_id}' completed by agent '{task.target_agent}'")
179
+
180
+ # Trigger workflow rules if enabled
181
+ if trigger_rules:
182
+ self._trigger_workflow_rules(task)
183
+
184
+ def fail_task(self, task_id: str, error: str) -> None:
185
+ """Mark a task as failed."""
186
+ if task_id not in self.tasks:
187
+ logger.error(f"❌ Task '{task_id}' not found")
188
+ return
189
+
190
+ task = self.tasks[task_id]
191
+ task.status = TaskStatus.FAILED
192
+ task.completed_at = time.time()
193
+ task.error = error
194
+
195
+ # Remove from agent queue
196
+ if task.target_agent and task.target_agent in self.agent_queues:
197
+ if task_id in self.agent_queues[task.target_agent]:
198
+ self.agent_queues[task.target_agent].remove(task_id)
199
+
200
+ logger.error(f"❌ Task '{task_id}' failed: {error}")
201
+
202
+ def add_workflow_rule(self, rule: WorkflowRule) -> None:
203
+ """Add a workflow rule for automatic task routing."""
204
+ self.workflow_rules.append(rule)
205
+ logger.info(
206
+ f"🔄 Workflow rule added: {rule.source_task_type} → "
207
+ f"{rule.target_agent} ({rule.target_task_type})"
208
+ )
209
+
210
+ def _dependencies_satisfied(self, task: Task) -> bool:
211
+ """Check if all task dependencies are satisfied."""
212
+ for dep_id in task.dependencies:
213
+ if dep_id not in self.tasks:
214
+ return False
215
+ dep_task = self.tasks[dep_id]
216
+ if dep_task.status != TaskStatus.COMPLETED:
217
+ return False
218
+ return True
219
+
220
+ def _trigger_workflow_rules(self, completed_task: Task) -> None:
221
+ """Trigger workflow rules based on completed task."""
222
+ for rule in self.workflow_rules:
223
+ if rule.source_task_type == completed_task.type:
224
+ # Check condition if specified
225
+ if rule.condition and not rule.condition(completed_task):
226
+ continue
227
+
228
+ # Transform data if specified
229
+ if rule.transform_data and completed_task.result:
230
+ new_data = rule.transform_data(completed_task.result)
231
+ else:
232
+ new_data = completed_task.result or {}
233
+
234
+ # Create new task
235
+ new_task_id = self.add_task(
236
+ task_type=rule.target_task_type,
237
+ data=new_data,
238
+ source_agent=completed_task.target_agent or completed_task.source_agent,
239
+ target_agent=rule.target_agent,
240
+ priority=rule.priority,
241
+ )
242
+
243
+ logger.info(
244
+ f"🔄 Workflow rule triggered: {completed_task.id} → {new_task_id}"
245
+ )
246
+
247
+ def get_task_status(self, task_id: str) -> Optional[Dict[str, Any]]:
248
+ """Get task status and details."""
249
+ if task_id not in self.tasks:
250
+ return None
251
+ return self.tasks[task_id].to_dict()
252
+
253
+ def get_agent_queue_status(self, agent_name: str) -> Dict[str, Any]:
254
+ """Get status of an agent's task queue."""
255
+ if agent_name not in self.agent_queues:
256
+ return {"agent": agent_name, "queue_length": 0, "tasks": []}
257
+
258
+ queue = self.agent_queues[agent_name]
259
+ task_details = []
260
+
261
+ for task_id in queue:
262
+ if task_id in self.tasks:
263
+ task = self.tasks[task_id]
264
+ task_details.append({
265
+ "id": task_id,
266
+ "type": task.type,
267
+ "status": task.status.value,
268
+ "priority": task.priority.value,
269
+ "created_at": task.created_at,
270
+ })
271
+
272
+ return {
273
+ "agent": agent_name,
274
+ "queue_length": len(queue),
275
+ "tasks": task_details,
276
+ }
277
+
278
+ def get_workflow_summary(self) -> Dict[str, Any]:
279
+ """Get overall workflow summary."""
280
+ status_counts = {}
281
+ for task in self.tasks.values():
282
+ status = task.status.value
283
+ status_counts[status] = status_counts.get(status, 0) + 1
284
+
285
+ return {
286
+ "total_tasks": len(self.tasks),
287
+ "status_counts": status_counts,
288
+ "agent_queues": {
289
+ agent: len(queue) for agent, queue in self.agent_queues.items()
290
+ },
291
+ "completed_tasks": len(self.completed_tasks),
292
+ "workflow_rules": len(self.workflow_rules),
293
+ }
294
+
295
+
296
+ class WorkflowOrchestrator:
297
+ """Enhanced orchestrator with workflow and agent-to-agent communication."""
298
+
299
+ def __init__(self, orchestrator):
300
+ """Initialize with a base orchestrator."""
301
+ self.orchestrator = orchestrator
302
+ self.task_board = TaskBoard()
303
+ self.auto_processing = False
304
+ self._processing_interval = 1.0 # seconds
305
+
306
+ def enable_auto_processing(self, interval: float = 1.0) -> None:
307
+ """Enable automatic task processing."""
308
+ self.auto_processing = True
309
+ self._processing_interval = interval
310
+ logger.info(f"🤖 Auto-processing enabled (interval: {interval}s)")
311
+
312
+ def disable_auto_processing(self) -> None:
313
+ """Disable automatic task processing."""
314
+ self.auto_processing = False
315
+ logger.info("⏸️ Auto-processing disabled")
316
+
317
+ def submit_task_for_agent(
318
+ self,
319
+ source_agent: str,
320
+ target_agent: str,
321
+ task_type: str,
322
+ task_data: Dict[str, Any],
323
+ priority: TaskPriority = TaskPriority.NORMAL,
324
+ ) -> str:
325
+ """Submit a task from one agent to another."""
326
+ return self.task_board.add_task(
327
+ task_type=task_type,
328
+ data=task_data,
329
+ source_agent=source_agent,
330
+ target_agent=target_agent,
331
+ priority=priority,
332
+ )
333
+
334
+ def process_agent_tasks(self, agent_name: str, max_tasks: int = 1) -> List[Dict[str, Any]]:
335
+ """Process available tasks for an agent."""
336
+ if agent_name not in self.orchestrator.agents:
337
+ logger.error(f"❌ Agent '{agent_name}' not registered")
338
+ return []
339
+
340
+ results = []
341
+ tasks_processed = 0
342
+
343
+ while tasks_processed < max_tasks:
344
+ task = self.task_board.get_next_task(agent_name)
345
+ if not task:
346
+ break
347
+
348
+ try:
349
+ # Convert task to agent message format
350
+ # Only include the task type and the actual task data
351
+ message = {
352
+ "task": task.type,
353
+ **task.data,
354
+ }
355
+
356
+ # Send to agent
357
+ response = self.orchestrator.send_message(agent_name, message)
358
+
359
+ if "error" in response:
360
+ self.task_board.fail_task(task.id, response["error"])
361
+ results.append({"task_id": task.id, "status": "failed", "error": response["error"]})
362
+ else:
363
+ self.task_board.complete_task(task.id, response)
364
+ results.append({"task_id": task.id, "status": "completed", "result": response})
365
+
366
+ tasks_processed += 1
367
+
368
+ except Exception as e:
369
+ error_msg = f"Task processing failed: {e}"
370
+ self.task_board.fail_task(task.id, error_msg)
371
+ results.append({"task_id": task.id, "status": "failed", "error": error_msg})
372
+ tasks_processed += 1
373
+
374
+ return results
375
+
376
+ def add_workflow_rule(
377
+ self,
378
+ source_task_type: str,
379
+ target_agent: str,
380
+ target_task_type: str,
381
+ condition: Optional[Callable[[Task], bool]] = None,
382
+ transform_data: Optional[Callable[[Dict[str, Any]], Dict[str, Any]]] = None,
383
+ priority: TaskPriority = TaskPriority.NORMAL,
384
+ ) -> None:
385
+ """Add a workflow rule for automatic task chaining."""
386
+ rule = WorkflowRule(
387
+ source_task_type=source_task_type,
388
+ target_agent=target_agent,
389
+ target_task_type=target_task_type,
390
+ condition=condition,
391
+ transform_data=transform_data,
392
+ priority=priority,
393
+ )
394
+ self.task_board.add_workflow_rule(rule)
395
+
396
+ def get_workflow_status(self) -> Dict[str, Any]:
397
+ """Get comprehensive workflow status."""
398
+ return {
399
+ "orchestrator": {
400
+ "session_id": self.orchestrator.session_id,
401
+ "registered_agents": list(self.orchestrator.agents.keys()),
402
+ "auto_processing": self.auto_processing,
403
+ },
404
+ "task_board": self.task_board.get_workflow_summary(),
405
+ "agent_queues": {
406
+ agent: self.task_board.get_agent_queue_status(agent)
407
+ for agent in self.orchestrator.agents.keys()
408
+ },
409
+ }
@@ -0,0 +1,508 @@
1
+ """
2
+ DACP Workflow Runtime - Declarative workflow execution from workflow.yaml
3
+
4
+ This module provides a runtime system that reads workflow.yaml files and
5
+ orchestrates agent collaboration through agent and task registries.
6
+ """
7
+
8
+ import logging
9
+ import time
10
+ import uuid
11
+ import yaml
12
+ import json
13
+ from typing import Dict, Any, List, Optional, Union
14
+ from pathlib import Path
15
+ from dataclasses import dataclass, field
16
+ from enum import Enum
17
+
18
+ logger = logging.getLogger("dacp.workflow_runtime")
19
+
20
+
21
+ class TaskStatus(Enum):
22
+ """Task execution status."""
23
+ PENDING = "pending"
24
+ RUNNING = "running"
25
+ COMPLETED = "completed"
26
+ FAILED = "failed"
27
+ CANCELLED = "cancelled"
28
+
29
+
30
+ @dataclass
31
+ class TaskExecution:
32
+ """Represents a task execution instance."""
33
+ id: str
34
+ workflow_id: str
35
+ step_id: str
36
+ agent_id: str
37
+ task_name: str
38
+ input_data: Dict[str, Any]
39
+ status: TaskStatus = TaskStatus.PENDING
40
+ output_data: Optional[Dict[str, Any]] = None
41
+ error: Optional[str] = None
42
+ created_at: float = field(default_factory=time.time)
43
+ started_at: Optional[float] = None
44
+ completed_at: Optional[float] = None
45
+ duration: Optional[float] = None
46
+
47
+ def to_dict(self) -> Dict[str, Any]:
48
+ """Convert to dictionary representation."""
49
+ return {
50
+ "id": self.id,
51
+ "workflow_id": self.workflow_id,
52
+ "step_id": self.step_id,
53
+ "agent_id": self.agent_id,
54
+ "task_name": self.task_name,
55
+ "input_data": self.input_data,
56
+ "status": self.status.value,
57
+ "output_data": self.output_data,
58
+ "error": self.error,
59
+ "created_at": self.created_at,
60
+ "started_at": self.started_at,
61
+ "completed_at": self.completed_at,
62
+ "duration": self.duration,
63
+ }
64
+
65
+
66
+ @dataclass
67
+ class RegisteredAgent:
68
+ """Represents a registered agent in the registry."""
69
+ id: str
70
+ agent_instance: Any
71
+ spec_file: Optional[str] = None
72
+ metadata: Dict[str, Any] = field(default_factory=dict)
73
+ registered_at: float = field(default_factory=time.time)
74
+ last_activity: Optional[float] = None
75
+
76
+ def to_dict(self) -> Dict[str, Any]:
77
+ """Convert to dictionary representation."""
78
+ return {
79
+ "id": self.id,
80
+ "agent_type": type(self.agent_instance).__name__,
81
+ "spec_file": self.spec_file,
82
+ "metadata": self.metadata,
83
+ "registered_at": self.registered_at,
84
+ "last_activity": self.last_activity,
85
+ }
86
+
87
+
88
+ class AgentRegistry:
89
+ """Registry for managing agent instances."""
90
+
91
+ def __init__(self):
92
+ self.agents: Dict[str, RegisteredAgent] = {}
93
+
94
+ def register_agent(
95
+ self,
96
+ agent_id: str,
97
+ agent_instance: Any,
98
+ spec_file: Optional[str] = None,
99
+ metadata: Optional[Dict[str, Any]] = None
100
+ ) -> None:
101
+ """Register an agent instance."""
102
+ registered_agent = RegisteredAgent(
103
+ id=agent_id,
104
+ agent_instance=agent_instance,
105
+ spec_file=spec_file,
106
+ metadata=metadata or {}
107
+ )
108
+
109
+ self.agents[agent_id] = registered_agent
110
+ logger.info(f"🤖 Agent '{agent_id}' registered in registry")
111
+
112
+ def get_agent(self, agent_id: str) -> Optional[Any]:
113
+ """Get an agent instance by ID."""
114
+ if agent_id in self.agents:
115
+ self.agents[agent_id].last_activity = time.time()
116
+ return self.agents[agent_id].agent_instance
117
+ return None
118
+
119
+ def list_agents(self) -> List[str]:
120
+ """List all registered agent IDs."""
121
+ return list(self.agents.keys())
122
+
123
+ def get_agent_info(self, agent_id: str) -> Optional[Dict[str, Any]]:
124
+ """Get agent registration information."""
125
+ if agent_id in self.agents:
126
+ return self.agents[agent_id].to_dict()
127
+ return None
128
+
129
+ def unregister_agent(self, agent_id: str) -> bool:
130
+ """Unregister an agent."""
131
+ if agent_id in self.agents:
132
+ del self.agents[agent_id]
133
+ logger.info(f"🗑️ Agent '{agent_id}' unregistered from registry")
134
+ return True
135
+ return False
136
+
137
+
138
+ class TaskRegistry:
139
+ """Registry for managing task executions."""
140
+
141
+ def __init__(self):
142
+ self.tasks: Dict[str, TaskExecution] = {}
143
+ self.workflow_tasks: Dict[str, List[str]] = {} # workflow_id -> task_ids
144
+
145
+ def create_task(
146
+ self,
147
+ workflow_id: str,
148
+ step_id: str,
149
+ agent_id: str,
150
+ task_name: str,
151
+ input_data: Dict[str, Any]
152
+ ) -> str:
153
+ """Create a new task execution."""
154
+ task_id = str(uuid.uuid4())
155
+
156
+ task = TaskExecution(
157
+ id=task_id,
158
+ workflow_id=workflow_id,
159
+ step_id=step_id,
160
+ agent_id=agent_id,
161
+ task_name=task_name,
162
+ input_data=input_data
163
+ )
164
+
165
+ self.tasks[task_id] = task
166
+
167
+ # Add to workflow tasks
168
+ if workflow_id not in self.workflow_tasks:
169
+ self.workflow_tasks[workflow_id] = []
170
+ self.workflow_tasks[workflow_id].append(task_id)
171
+
172
+ logger.info(f"📋 Task '{task_id}' created for agent '{agent_id}' in workflow '{workflow_id}'")
173
+ return task_id
174
+
175
+ def get_task(self, task_id: str) -> Optional[TaskExecution]:
176
+ """Get a task by ID."""
177
+ return self.tasks.get(task_id)
178
+
179
+ def update_task_status(self, task_id: str, status: TaskStatus, **kwargs) -> bool:
180
+ """Update task status and optional fields."""
181
+ if task_id not in self.tasks:
182
+ return False
183
+
184
+ task = self.tasks[task_id]
185
+ task.status = status
186
+
187
+ # Update optional fields
188
+ for key, value in kwargs.items():
189
+ if hasattr(task, key):
190
+ setattr(task, key, value)
191
+
192
+ # Calculate duration if completed
193
+ if status in [TaskStatus.COMPLETED, TaskStatus.FAILED] and task.started_at:
194
+ task.completed_at = time.time()
195
+ task.duration = task.completed_at - task.started_at
196
+
197
+ logger.info(f"📊 Task '{task_id}' status updated to {status.value}")
198
+ return True
199
+
200
+ def get_workflow_tasks(self, workflow_id: str) -> List[TaskExecution]:
201
+ """Get all tasks for a workflow."""
202
+ task_ids = self.workflow_tasks.get(workflow_id, [])
203
+ return [self.tasks[tid] for tid in task_ids if tid in self.tasks]
204
+
205
+ def get_task_summary(self) -> Dict[str, Any]:
206
+ """Get summary of all tasks."""
207
+ status_counts = {}
208
+ for task in self.tasks.values():
209
+ status = task.status.value
210
+ status_counts[status] = status_counts.get(status, 0) + 1
211
+
212
+ return {
213
+ "total_tasks": len(self.tasks),
214
+ "status_counts": status_counts,
215
+ "workflows": len(self.workflow_tasks)
216
+ }
217
+
218
+
219
+ class WorkflowRuntime:
220
+ """DACP Workflow Runtime - Executes workflows from workflow.yaml"""
221
+
222
+ def __init__(self, orchestrator=None):
223
+ self.orchestrator = orchestrator
224
+ self.agent_registry = AgentRegistry()
225
+ self.task_registry = TaskRegistry()
226
+ self.workflow_config = {}
227
+ self.active_workflows: Dict[str, Dict[str, Any]] = {}
228
+
229
+ def load_workflow_config(self, config_path: str) -> None:
230
+ """Load workflow configuration from YAML file."""
231
+ config_file = Path(config_path)
232
+ if not config_file.exists():
233
+ raise FileNotFoundError(f"Workflow config file not found: {config_path}")
234
+
235
+ with open(config_file, 'r') as f:
236
+ self.workflow_config = yaml.safe_load(f)
237
+
238
+ logger.info(f"📁 Loaded workflow config from {config_path}")
239
+ logger.info(f"📋 Found {len(self.workflow_config.get('workflows', {}))} workflows")
240
+
241
+ def register_agent_from_config(self, agent_id: str, agent_instance: Any) -> None:
242
+ """Register an agent instance based on workflow config."""
243
+ # Find agent spec in config
244
+ agent_spec = None
245
+ for agent_config in self.workflow_config.get('agents', []):
246
+ if agent_config['id'] == agent_id:
247
+ agent_spec = agent_config.get('spec')
248
+ break
249
+
250
+ self.agent_registry.register_agent(
251
+ agent_id=agent_id,
252
+ agent_instance=agent_instance,
253
+ spec_file=agent_spec,
254
+ metadata={"config_based": True}
255
+ )
256
+
257
+ def execute_workflow(self, workflow_name: str, initial_input: Dict[str, Any] = None) -> str:
258
+ """Execute a workflow by name."""
259
+ if workflow_name not in self.workflow_config.get('workflows', {}):
260
+ raise ValueError(f"Workflow '{workflow_name}' not found in config")
261
+
262
+ workflow_def = self.workflow_config['workflows'][workflow_name]
263
+ workflow_id = str(uuid.uuid4())
264
+
265
+ logger.info(f"🚀 Starting workflow '{workflow_name}' with ID '{workflow_id}'")
266
+
267
+ # Initialize workflow state
268
+ self.active_workflows[workflow_id] = {
269
+ "name": workflow_name,
270
+ "definition": workflow_def,
271
+ "current_step": 0,
272
+ "context": initial_input or {},
273
+ "started_at": time.time()
274
+ }
275
+
276
+ # Execute first step
277
+ self._execute_workflow_step(workflow_id, 0)
278
+
279
+ return workflow_id
280
+
281
+ def _execute_workflow_step(self, workflow_id: str, step_index: int) -> None:
282
+ """Execute a specific workflow step."""
283
+ if workflow_id not in self.active_workflows:
284
+ logger.error(f"❌ Workflow '{workflow_id}' not found")
285
+ return
286
+
287
+ workflow_state = self.active_workflows[workflow_id]
288
+ workflow_def = workflow_state["definition"]
289
+ steps = workflow_def.get("steps", [])
290
+
291
+ if step_index >= len(steps):
292
+ logger.info(f"🏁 Workflow '{workflow_id}' completed")
293
+ return
294
+
295
+ step = steps[step_index]
296
+ step_id = f"step_{step_index}"
297
+
298
+ # Extract step configuration
299
+ agent_id = step.get("agent")
300
+ task_name = step.get("task")
301
+ step_input = step.get("input", {})
302
+
303
+ # Resolve input data with context
304
+ resolved_input = self._resolve_input_data(step_input, workflow_state["context"])
305
+
306
+ logger.info(f"📋 Executing step {step_index}: {agent_id}.{task_name}")
307
+
308
+ # Create task
309
+ task_id = self.task_registry.create_task(
310
+ workflow_id=workflow_id,
311
+ step_id=step_id,
312
+ agent_id=agent_id,
313
+ task_name=task_name,
314
+ input_data=resolved_input
315
+ )
316
+
317
+ # Execute task
318
+ self._execute_task(task_id, workflow_id, step_index)
319
+
320
+ def _execute_task(self, task_id: str, workflow_id: str, step_index: int) -> None:
321
+ """Execute a single task."""
322
+ task = self.task_registry.get_task(task_id)
323
+ if not task:
324
+ logger.error(f"❌ Task '{task_id}' not found")
325
+ return
326
+
327
+ # Get agent instance
328
+ agent = self.agent_registry.get_agent(task.agent_id)
329
+ if not agent:
330
+ self.task_registry.update_task_status(
331
+ task_id, TaskStatus.FAILED,
332
+ error=f"Agent '{task.agent_id}' not found"
333
+ )
334
+ return
335
+
336
+ # Update task status
337
+ self.task_registry.update_task_status(
338
+ task_id, TaskStatus.RUNNING,
339
+ started_at=time.time()
340
+ )
341
+
342
+ try:
343
+ # Prepare message for agent
344
+ message = {
345
+ "task": task.task_name,
346
+ **task.input_data
347
+ }
348
+
349
+ logger.info(f"📨 Sending task '{task.task_name}' to agent '{task.agent_id}'")
350
+
351
+ # Execute via orchestrator or direct call
352
+ if self.orchestrator:
353
+ result = self.orchestrator.send_message(task.agent_id, message)
354
+ else:
355
+ result = agent.handle_message(message)
356
+
357
+ # Check for errors
358
+ if isinstance(result, dict) and "error" in result:
359
+ self.task_registry.update_task_status(
360
+ task_id, TaskStatus.FAILED,
361
+ error=result["error"]
362
+ )
363
+ logger.error(f"❌ Task '{task_id}' failed: {result['error']}")
364
+ return
365
+
366
+ # Task completed successfully
367
+ self.task_registry.update_task_status(
368
+ task_id, TaskStatus.COMPLETED,
369
+ output_data=result
370
+ )
371
+
372
+ logger.info(f"✅ Task '{task_id}' completed successfully")
373
+
374
+ # Continue workflow
375
+ self._handle_task_completion(task_id, workflow_id, step_index, result)
376
+
377
+ except Exception as e:
378
+ self.task_registry.update_task_status(
379
+ task_id, TaskStatus.FAILED,
380
+ error=str(e)
381
+ )
382
+ logger.error(f"❌ Task '{task_id}' failed with exception: {e}")
383
+
384
+ def _handle_task_completion(self, task_id: str, workflow_id: str, step_index: int, result: Dict[str, Any]) -> None:
385
+ """Handle task completion and route to next step."""
386
+ workflow_state = self.active_workflows[workflow_id]
387
+ workflow_def = workflow_state["definition"]
388
+ steps = workflow_def.get("steps", [])
389
+
390
+ if step_index >= len(steps):
391
+ return
392
+
393
+ current_step = steps[step_index]
394
+
395
+ # Convert result to dictionary if it's a Pydantic model
396
+ if hasattr(result, 'model_dump'):
397
+ result_dict = result.model_dump()
398
+ logger.debug(f"🔧 Converted Pydantic model to dict: {result_dict}")
399
+ elif hasattr(result, 'dict'):
400
+ result_dict = result.dict()
401
+ logger.debug(f"🔧 Converted Pydantic model to dict (legacy): {result_dict}")
402
+ else:
403
+ result_dict = result
404
+
405
+ # Update workflow context with result
406
+ workflow_state["context"].update({"output": result_dict})
407
+
408
+ # Check for routing
409
+ route_config = current_step.get("route_output_to")
410
+ if route_config:
411
+ # Route to next agent
412
+ next_agent_id = route_config.get("agent")
413
+ next_task_name = route_config.get("task")
414
+ input_mapping = route_config.get("input_mapping", {})
415
+
416
+ logger.debug(f"🔍 Input mapping: {input_mapping}")
417
+ logger.debug(f"🔍 Available output data: {result_dict}")
418
+
419
+ # Resolve input mapping
420
+ next_input = self._resolve_input_mapping(input_mapping, result_dict, workflow_state["context"])
421
+
422
+ logger.info(f"🔄 Routing output to {next_agent_id}.{next_task_name}")
423
+ logger.debug(f"🔍 Resolved input for next task: {next_input}")
424
+
425
+ # Create and execute next task
426
+ next_task_id = self.task_registry.create_task(
427
+ workflow_id=workflow_id,
428
+ step_id=f"routed_step_{step_index}",
429
+ agent_id=next_agent_id,
430
+ task_name=next_task_name,
431
+ input_data=next_input
432
+ )
433
+
434
+ self._execute_task(next_task_id, workflow_id, step_index + 1)
435
+ else:
436
+ # Continue to next step
437
+ self._execute_workflow_step(workflow_id, step_index + 1)
438
+
439
+ def _resolve_input_data(self, input_config: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
440
+ """Resolve input data with context variables."""
441
+ resolved = {}
442
+ for key, value in input_config.items():
443
+ if isinstance(value, str) and value.startswith("{{") and value.endswith("}}"):
444
+ # Template variable
445
+ var_path = value[2:-2].strip()
446
+ resolved[key] = self._get_nested_value(context, var_path)
447
+ else:
448
+ resolved[key] = value
449
+ return resolved
450
+
451
+ def _resolve_input_mapping(self, mapping: Dict[str, str], output: Dict[str, Any], context: Dict[str, Any]) -> Dict[str, Any]:
452
+ """Resolve input mapping with output and context."""
453
+ resolved = {}
454
+ for target_key, source_template in mapping.items():
455
+ if isinstance(source_template, str) and source_template.startswith("{{") and source_template.endswith("}}"):
456
+ var_path = source_template[2:-2].strip()
457
+ if var_path.startswith("output."):
458
+ # From current output
459
+ field_name = var_path[7:] # Remove "output."
460
+ resolved[target_key] = output.get(field_name, "")
461
+ else:
462
+ # From context
463
+ resolved[target_key] = self._get_nested_value(context, var_path)
464
+ else:
465
+ resolved[target_key] = source_template
466
+ return resolved
467
+
468
+ def _get_nested_value(self, data: Dict[str, Any], path: str) -> Any:
469
+ """Get nested value from dictionary using dot notation."""
470
+ keys = path.split('.')
471
+ current = data
472
+ for key in keys:
473
+ if isinstance(current, dict) and key in current:
474
+ current = current[key]
475
+ else:
476
+ return None
477
+ return current
478
+
479
+ def get_workflow_status(self, workflow_id: str) -> Optional[Dict[str, Any]]:
480
+ """Get workflow execution status."""
481
+ if workflow_id not in self.active_workflows:
482
+ return None
483
+
484
+ workflow_state = self.active_workflows[workflow_id]
485
+ tasks = self.task_registry.get_workflow_tasks(workflow_id)
486
+
487
+ return {
488
+ "workflow_id": workflow_id,
489
+ "name": workflow_state["name"],
490
+ "current_step": workflow_state["current_step"],
491
+ "started_at": workflow_state["started_at"],
492
+ "context": workflow_state["context"],
493
+ "tasks": [task.to_dict() for task in tasks]
494
+ }
495
+
496
+ def get_runtime_status(self) -> Dict[str, Any]:
497
+ """Get overall runtime status."""
498
+ return {
499
+ "agents": {
500
+ "registered": len(self.agent_registry.agents),
501
+ "agents": [agent.to_dict() for agent in self.agent_registry.agents.values()]
502
+ },
503
+ "tasks": self.task_registry.get_task_summary(),
504
+ "workflows": {
505
+ "active": len(self.active_workflows),
506
+ "configured": len(self.workflow_config.get('workflows', {}))
507
+ }
508
+ }
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: dacp
3
- Version: 0.3.2
3
+ Version: 0.3.3
4
4
  Summary: Declarative Agent Communication Protocol - A protocol for managing LLM/agent communications and tool function calls
5
5
  Author-email: Andrew Whitehouse <andrew.whitehouse@example.com>
6
6
  License: MIT
@@ -4,6 +4,7 @@ pyproject.toml
4
4
  dacp/__init__.py
5
5
  dacp/exceptions.py
6
6
  dacp/intelligence.py
7
+ dacp/json_parser.py
7
8
  dacp/llm.py
8
9
  dacp/logging_config.py
9
10
  dacp/main.py
@@ -11,6 +12,8 @@ dacp/orchestrator.py
11
12
  dacp/protocol.py
12
13
  dacp/tools.py
13
14
  dacp/types.py
15
+ dacp/workflow.py
16
+ dacp/workflow_runtime.py
14
17
  dacp.egg-info/PKG-INFO
15
18
  dacp.egg-info/SOURCES.txt
16
19
  dacp.egg-info/dependency_links.txt
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "dacp"
7
- version = "0.3.2"
7
+ version = "0.3.3"
8
8
  description = "Declarative Agent Communication Protocol - A protocol for managing LLM/agent communications and tool function calls"
9
9
  readme = "README.md"
10
10
  license = {text = "MIT"}
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes