dacp 0.3.3__py3-none-any.whl → 0.3.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dacp/__init__.py +1 -1
- dacp/api.py +365 -0
- dacp/cli.py +249 -0
- dacp/intelligence.py +10 -28
- dacp/json_parser.py +47 -49
- dacp/logging_config.py +1 -3
- dacp/orchestrator.py +10 -28
- dacp/tools.py +1 -3
- dacp/workflow.py +30 -25
- dacp/workflow_runtime.py +148 -110
- {dacp-0.3.3.dist-info → dacp-0.3.4.dist-info}/METADATA +8 -3
- dacp-0.3.4.dist-info/RECORD +21 -0
- dacp-0.3.4.dist-info/entry_points.txt +2 -0
- dacp-0.3.3.dist-info/RECORD +0 -18
- {dacp-0.3.3.dist-info → dacp-0.3.4.dist-info}/WHEEL +0 -0
- {dacp-0.3.3.dist-info → dacp-0.3.4.dist-info}/licenses/LICENSE +0 -0
- {dacp-0.3.3.dist-info → dacp-0.3.4.dist-info}/top_level.txt +0 -0
dacp/intelligence.py
CHANGED
@@ -13,9 +13,7 @@ from typing import Dict, Any, Union
|
|
13
13
|
logger = logging.getLogger("dacp.intelligence")
|
14
14
|
|
15
15
|
|
16
|
-
def invoke_intelligence(
|
17
|
-
prompt: str, config: Dict[str, Any]
|
18
|
-
) -> Union[str, Dict[str, Any]]:
|
16
|
+
def invoke_intelligence(prompt: str, config: Dict[str, Any]) -> Union[str, Dict[str, Any]]:
|
19
17
|
"""
|
20
18
|
Invoke an intelligence provider with the given prompt and configuration.
|
21
19
|
|
@@ -54,8 +52,7 @@ def invoke_intelligence(
|
|
54
52
|
else:
|
55
53
|
available_engines = ["openai", "anthropic", "azure", "local"]
|
56
54
|
raise ValueError(
|
57
|
-
f"Unsupported engine: {engine}. "
|
58
|
-
f"Available engines: {available_engines}"
|
55
|
+
f"Unsupported engine: {engine}. Available engines: {available_engines}"
|
59
56
|
)
|
60
57
|
|
61
58
|
duration = time.time() - start_time
|
@@ -66,10 +63,7 @@ def invoke_intelligence(
|
|
66
63
|
|
67
64
|
except Exception as e:
|
68
65
|
duration = time.time() - start_time
|
69
|
-
logger.error(
|
70
|
-
f"❌ Intelligence call failed after {duration:.3f}s: "
|
71
|
-
f"{type(e).__name__}: {e}"
|
72
|
-
)
|
66
|
+
logger.error(f"❌ Intelligence call failed after {duration:.3f}s: {type(e).__name__}: {e}")
|
73
67
|
raise
|
74
68
|
|
75
69
|
|
@@ -95,8 +89,7 @@ def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
|
|
95
89
|
if not api_key:
|
96
90
|
logger.error("❌ OpenAI API key not found")
|
97
91
|
raise ValueError(
|
98
|
-
"OpenAI API key not found in config or OPENAI_API_KEY "
|
99
|
-
"environment variable"
|
92
|
+
"OpenAI API key not found in config or OPENAI_API_KEY environment variable"
|
100
93
|
)
|
101
94
|
|
102
95
|
# Configure client
|
@@ -111,10 +104,7 @@ def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
|
|
111
104
|
temperature = config.get("temperature", 0.7)
|
112
105
|
max_tokens = config.get("max_tokens", 1000)
|
113
106
|
|
114
|
-
logger.debug(
|
115
|
-
f"🔧 OpenAI config: model={model}, temp={temperature}, "
|
116
|
-
f"max_tokens={max_tokens}"
|
117
|
-
)
|
107
|
+
logger.debug(f"🔧 OpenAI config: model={model}, temp={temperature}, max_tokens={max_tokens}")
|
118
108
|
|
119
109
|
# Make API call
|
120
110
|
response = client.chat.completions.create(
|
@@ -143,8 +133,7 @@ def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
|
|
143
133
|
if not api_key:
|
144
134
|
logger.error("❌ Anthropic API key not found")
|
145
135
|
raise ValueError(
|
146
|
-
"Anthropic API key not found in config or ANTHROPIC_API_KEY "
|
147
|
-
"environment variable"
|
136
|
+
"Anthropic API key not found in config or ANTHROPIC_API_KEY environment variable"
|
148
137
|
)
|
149
138
|
|
150
139
|
# Configure client
|
@@ -159,10 +148,7 @@ def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
|
|
159
148
|
temperature = config.get("temperature", 0.7)
|
160
149
|
max_tokens = config.get("max_tokens", 1000)
|
161
150
|
|
162
|
-
logger.debug(
|
163
|
-
f"🔧 Anthropic config: model={model}, temp={temperature}, "
|
164
|
-
f"max_tokens={max_tokens}"
|
165
|
-
)
|
151
|
+
logger.debug(f"🔧 Anthropic config: model={model}, temp={temperature}, max_tokens={max_tokens}")
|
166
152
|
|
167
153
|
# Make API call
|
168
154
|
response = client.messages.create(
|
@@ -195,8 +181,7 @@ def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
|
|
195
181
|
if not api_key:
|
196
182
|
logger.error("❌ Azure OpenAI API key not found")
|
197
183
|
raise ValueError(
|
198
|
-
"Azure OpenAI API key not found in config or "
|
199
|
-
"AZURE_OPENAI_API_KEY environment variable"
|
184
|
+
"Azure OpenAI API key not found in config or AZURE_OPENAI_API_KEY environment variable"
|
200
185
|
)
|
201
186
|
|
202
187
|
if not endpoint:
|
@@ -207,9 +192,7 @@ def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
|
|
207
192
|
)
|
208
193
|
|
209
194
|
# Configure Azure client
|
210
|
-
client = openai.AzureOpenAI(
|
211
|
-
api_key=api_key, azure_endpoint=endpoint, api_version=api_version
|
212
|
-
)
|
195
|
+
client = openai.AzureOpenAI(api_key=api_key, azure_endpoint=endpoint, api_version=api_version)
|
213
196
|
|
214
197
|
# Prepare request
|
215
198
|
model = config.get("model", config.get("deployment_name", "gpt-35-turbo"))
|
@@ -217,8 +200,7 @@ def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
|
|
217
200
|
max_tokens = config.get("max_tokens", 1000)
|
218
201
|
|
219
202
|
logger.debug(
|
220
|
-
f"🔧 Azure OpenAI config: model={model}, temp={temperature}, "
|
221
|
-
f"max_tokens={max_tokens}"
|
203
|
+
f"🔧 Azure OpenAI config: model={model}, temp={temperature}, max_tokens={max_tokens}"
|
222
204
|
)
|
223
205
|
|
224
206
|
# Make API call
|
dacp/json_parser.py
CHANGED
@@ -17,18 +17,18 @@ logger = logging.getLogger("dacp.json_parser")
|
|
17
17
|
def extract_json_from_text(text: str) -> Optional[Dict[str, Any]]:
|
18
18
|
"""
|
19
19
|
Extract JSON from text using multiple strategies.
|
20
|
-
|
20
|
+
|
21
21
|
Args:
|
22
22
|
text: Raw text that might contain JSON
|
23
|
-
|
23
|
+
|
24
24
|
Returns:
|
25
25
|
Parsed JSON dict or None if no valid JSON found
|
26
26
|
"""
|
27
27
|
if not isinstance(text, str):
|
28
28
|
return None
|
29
|
-
|
29
|
+
|
30
30
|
logger.debug(f"🔍 Attempting to extract JSON from text: {text[:100]}...")
|
31
|
-
|
31
|
+
|
32
32
|
# Strategy 1: Try parsing the entire text as JSON
|
33
33
|
try:
|
34
34
|
result = json.loads(text.strip())
|
@@ -36,10 +36,10 @@ def extract_json_from_text(text: str) -> Optional[Dict[str, Any]]:
|
|
36
36
|
return result
|
37
37
|
except json.JSONDecodeError:
|
38
38
|
logger.debug("❌ Failed to parse entire text as JSON")
|
39
|
-
|
39
|
+
|
40
40
|
# Strategy 2: Find JSON between braces
|
41
|
-
json_start = text.find(
|
42
|
-
json_end = text.rfind(
|
41
|
+
json_start = text.find("{")
|
42
|
+
json_end = text.rfind("}") + 1
|
43
43
|
if json_start >= 0 and json_end > json_start:
|
44
44
|
json_str = text[json_start:json_end]
|
45
45
|
try:
|
@@ -48,9 +48,9 @@ def extract_json_from_text(text: str) -> Optional[Dict[str, Any]]:
|
|
48
48
|
return result
|
49
49
|
except json.JSONDecodeError:
|
50
50
|
logger.debug("❌ Failed to parse JSON between braces")
|
51
|
-
|
51
|
+
|
52
52
|
# Strategy 3: Find JSON in code blocks
|
53
|
-
code_block_pattern = r
|
53
|
+
code_block_pattern = r"```(?:json)?\s*(\{.*?\})\s*```"
|
54
54
|
matches = re.findall(code_block_pattern, text, re.DOTALL)
|
55
55
|
for match in matches:
|
56
56
|
try:
|
@@ -59,7 +59,7 @@ def extract_json_from_text(text: str) -> Optional[Dict[str, Any]]:
|
|
59
59
|
return result
|
60
60
|
except json.JSONDecodeError:
|
61
61
|
continue
|
62
|
-
|
62
|
+
|
63
63
|
# Strategy 4: Find JSON after common prefixes
|
64
64
|
prefixes = [
|
65
65
|
"json response:",
|
@@ -69,40 +69,40 @@ def extract_json_from_text(text: str) -> Optional[Dict[str, Any]]:
|
|
69
69
|
"here is the json:",
|
70
70
|
"the json is:",
|
71
71
|
]
|
72
|
-
|
72
|
+
|
73
73
|
for prefix in prefixes:
|
74
74
|
prefix_pos = text.lower().find(prefix.lower())
|
75
75
|
if prefix_pos >= 0:
|
76
|
-
remaining_text = text[prefix_pos + len(prefix):].strip()
|
76
|
+
remaining_text = text[prefix_pos + len(prefix) :].strip()
|
77
77
|
extracted = extract_json_from_text(remaining_text)
|
78
78
|
if extracted:
|
79
79
|
logger.debug(f"✅ Successfully extracted JSON after prefix: {prefix}")
|
80
80
|
return extracted
|
81
|
-
|
81
|
+
|
82
82
|
logger.debug("❌ No valid JSON found in text")
|
83
83
|
return None
|
84
84
|
|
85
85
|
|
86
86
|
def create_fallback_response(
|
87
|
-
text: str,
|
87
|
+
text: str,
|
88
88
|
required_fields: Dict[str, Any],
|
89
|
-
optional_fields: Dict[str, Any] = None
|
89
|
+
optional_fields: Optional[Dict[str, Any]] = None,
|
90
90
|
) -> Dict[str, Any]:
|
91
91
|
"""
|
92
92
|
Create a fallback response when JSON parsing fails.
|
93
|
-
|
93
|
+
|
94
94
|
Args:
|
95
95
|
text: Original LLM response text
|
96
96
|
required_fields: Dictionary of required field names and default values
|
97
97
|
optional_fields: Dictionary of optional field names and default values
|
98
|
-
|
98
|
+
|
99
99
|
Returns:
|
100
100
|
Dictionary with required fields filled
|
101
101
|
"""
|
102
102
|
logger.info(f"🔄 Creating fallback response for text: {text[:50]}...")
|
103
|
-
|
103
|
+
|
104
104
|
fallback = {}
|
105
|
-
|
105
|
+
|
106
106
|
# Fill required fields with defaults or extracted content
|
107
107
|
for field_name, default_value in required_fields.items():
|
108
108
|
if field_name in ["message", "response_message", "greeting_message"]:
|
@@ -111,7 +111,7 @@ def create_fallback_response(
|
|
111
111
|
logger.debug(f"📝 Using text as {field_name}")
|
112
112
|
elif field_name in ["agent", "sender_agent", "target_agent"]:
|
113
113
|
# Try to extract agent names or use default
|
114
|
-
agent_match = re.search(r
|
114
|
+
agent_match = re.search(r"agent[:\s]+([a-zA-Z0-9_-]+)", text, re.IGNORECASE)
|
115
115
|
if agent_match:
|
116
116
|
fallback[field_name] = agent_match.group(1)
|
117
117
|
logger.debug(f"🎯 Extracted agent name: {agent_match.group(1)}")
|
@@ -121,45 +121,47 @@ def create_fallback_response(
|
|
121
121
|
else:
|
122
122
|
fallback[field_name] = default_value
|
123
123
|
logger.debug(f"⚙️ Setting {field_name} to default: {default_value}")
|
124
|
-
|
124
|
+
|
125
125
|
# Fill optional fields if provided
|
126
126
|
if optional_fields:
|
127
127
|
for field_name, default_value in optional_fields.items():
|
128
128
|
fallback[field_name] = default_value
|
129
129
|
logger.debug(f"📋 Adding optional field {field_name}: {default_value}")
|
130
|
-
|
130
|
+
|
131
131
|
logger.info(f"✅ Created fallback response with {len(fallback)} fields")
|
132
132
|
return fallback
|
133
133
|
|
134
134
|
|
135
135
|
def robust_json_parse(
|
136
|
-
response: Union[str,
|
136
|
+
response: Union[str, Dict[str, Any], BaseModel],
|
137
137
|
target_model: type,
|
138
138
|
required_fields: Dict[str, Any],
|
139
|
-
optional_fields: Dict[str, Any] = None
|
140
|
-
) ->
|
139
|
+
optional_fields: Optional[Dict[str, Any]] = None,
|
140
|
+
) -> Any:
|
141
141
|
"""
|
142
142
|
Robust JSON parsing with intelligent fallbacks.
|
143
|
-
|
143
|
+
|
144
144
|
Args:
|
145
145
|
response: LLM response (string, dict, or Pydantic model)
|
146
146
|
target_model: Pydantic model class to create
|
147
147
|
required_fields: Required fields with default values
|
148
148
|
optional_fields: Optional fields with default values
|
149
|
-
|
149
|
+
|
150
150
|
Returns:
|
151
151
|
Instance of target_model
|
152
|
-
|
152
|
+
|
153
153
|
Raises:
|
154
154
|
ValueError: If parsing fails completely
|
155
155
|
"""
|
156
|
-
logger.debug(
|
157
|
-
|
156
|
+
logger.debug(
|
157
|
+
f"🔧 Parsing response of type {type(response).__name__} into {target_model.__name__}"
|
158
|
+
)
|
159
|
+
|
158
160
|
# If already the target model, return as-is
|
159
161
|
if isinstance(response, target_model):
|
160
162
|
logger.debug("✅ Response is already target model")
|
161
163
|
return response
|
162
|
-
|
164
|
+
|
163
165
|
# If dict, try to create model directly
|
164
166
|
if isinstance(response, dict):
|
165
167
|
try:
|
@@ -168,11 +170,11 @@ def robust_json_parse(
|
|
168
170
|
return result
|
169
171
|
except Exception as e:
|
170
172
|
logger.debug(f"❌ Failed to create model from dict: {e}")
|
171
|
-
|
173
|
+
|
172
174
|
# If string, try JSON extraction
|
173
175
|
if isinstance(response, str):
|
174
176
|
extracted_json = extract_json_from_text(response)
|
175
|
-
|
177
|
+
|
176
178
|
if extracted_json:
|
177
179
|
try:
|
178
180
|
result = target_model(**extracted_json)
|
@@ -180,15 +182,11 @@ def robust_json_parse(
|
|
180
182
|
return result
|
181
183
|
except Exception as e:
|
182
184
|
logger.debug(f"❌ Failed to create model from extracted JSON: {e}")
|
183
|
-
|
185
|
+
|
184
186
|
# Create fallback response
|
185
187
|
logger.info("🔄 Creating fallback response for string input")
|
186
|
-
fallback_data = create_fallback_response(
|
187
|
-
|
188
|
-
required_fields,
|
189
|
-
optional_fields
|
190
|
-
)
|
191
|
-
|
188
|
+
fallback_data = create_fallback_response(response, required_fields, optional_fields)
|
189
|
+
|
192
190
|
try:
|
193
191
|
result = target_model(**fallback_data)
|
194
192
|
logger.info("✅ Successfully created model from fallback data")
|
@@ -196,37 +194,37 @@ def robust_json_parse(
|
|
196
194
|
except Exception as e:
|
197
195
|
logger.error(f"❌ Failed to create fallback response: {e}")
|
198
196
|
raise ValueError(f"Failed to create fallback response: {e}")
|
199
|
-
|
197
|
+
|
200
198
|
# Unexpected response type
|
201
199
|
error_msg = f"Unable to parse response of type {type(response)}: {response}"
|
202
200
|
logger.error(f"❌ {error_msg}")
|
203
201
|
raise ValueError(error_msg)
|
204
202
|
|
205
203
|
|
206
|
-
def parse_with_fallback(response: Any, model_class: type, **field_defaults) ->
|
204
|
+
def parse_with_fallback(response: Any, model_class: type, **field_defaults: Any) -> Any:
|
207
205
|
"""
|
208
206
|
Convenience function for parsing with automatic field detection.
|
209
|
-
|
207
|
+
|
210
208
|
Args:
|
211
209
|
response: LLM response to parse
|
212
210
|
model_class: Pydantic model class
|
213
211
|
**field_defaults: Default values for fields (field_name=default_value)
|
214
|
-
|
212
|
+
|
215
213
|
Returns:
|
216
214
|
Instance of model_class
|
217
215
|
"""
|
218
216
|
# Extract required fields from model
|
219
217
|
required_fields = {}
|
220
218
|
optional_fields = {}
|
221
|
-
|
219
|
+
|
222
220
|
# Get field info from Pydantic model
|
223
|
-
if hasattr(model_class,
|
221
|
+
if hasattr(model_class, "model_fields"):
|
224
222
|
for field_name, field_info in model_class.model_fields.items():
|
225
223
|
default_value = field_defaults.get(field_name, "")
|
226
|
-
|
224
|
+
|
227
225
|
if field_info.is_required():
|
228
226
|
required_fields[field_name] = default_value
|
229
227
|
else:
|
230
228
|
optional_fields[field_name] = field_info.default
|
231
|
-
|
232
|
-
return robust_json_parse(response, model_class, required_fields, optional_fields)
|
229
|
+
|
230
|
+
return robust_json_parse(response, model_class, required_fields, optional_fields)
|
dacp/logging_config.py
CHANGED
@@ -32,9 +32,7 @@ def setup_dacp_logging(
|
|
32
32
|
log_format = "%(name)s - %(levelname)s - %(message)s"
|
33
33
|
elif format_style == "detailed":
|
34
34
|
if include_timestamp:
|
35
|
-
log_format = (
|
36
|
-
"%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s"
|
37
|
-
)
|
35
|
+
log_format = "%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s"
|
38
36
|
else:
|
39
37
|
log_format = "%(name)s:%(lineno)d - %(levelname)s - %(message)s"
|
40
38
|
elif format_style == "emoji":
|
dacp/orchestrator.py
CHANGED
@@ -62,10 +62,7 @@ class Orchestrator:
|
|
62
62
|
raise ValueError("Agent must inherit from dacp.Agent base class")
|
63
63
|
|
64
64
|
self.agents[name] = agent
|
65
|
-
logger.info(
|
66
|
-
f"✅ Agent '{name}' registered successfully "
|
67
|
-
f"(type: {type(agent).__name__})"
|
68
|
-
)
|
65
|
+
logger.info(f"✅ Agent '{name}' registered successfully (type: {type(agent).__name__})")
|
69
66
|
logger.debug(f"📊 Total registered agents: {len(self.agents)}")
|
70
67
|
|
71
68
|
def unregister_agent(self, name: str) -> bool:
|
@@ -121,12 +118,12 @@ class Orchestrator:
|
|
121
118
|
response = agent.handle_message(message)
|
122
119
|
|
123
120
|
# Handle Pydantic models by converting to dict
|
124
|
-
if hasattr(response,
|
121
|
+
if hasattr(response, "model_dump"):
|
125
122
|
logger.debug(f"📊 Converting Pydantic model to dict: {type(response).__name__}")
|
126
123
|
response = response.model_dump()
|
127
124
|
elif not isinstance(response, dict):
|
128
125
|
logger.debug(f"📊 Converting response to dict: {type(response)}")
|
129
|
-
if hasattr(response,
|
126
|
+
if hasattr(response, "__dict__"):
|
130
127
|
response = response.__dict__
|
131
128
|
else:
|
132
129
|
response = {"result": str(response)}
|
@@ -138,9 +135,7 @@ class Orchestrator:
|
|
138
135
|
# Check if agent requested tool execution
|
139
136
|
if isinstance(response, dict) and "tool_request" in response:
|
140
137
|
logger.info(f"🔧 Agent '{agent_name}' requested tool execution")
|
141
|
-
response = self._handle_tool_request(
|
142
|
-
agent_name, response["tool_request"]
|
143
|
-
)
|
138
|
+
response = self._handle_tool_request(agent_name, response["tool_request"])
|
144
139
|
|
145
140
|
# Log the conversation
|
146
141
|
self._log_conversation(agent_name, message, response, duration)
|
@@ -179,16 +174,11 @@ class Orchestrator:
|
|
179
174
|
responses[agent_name] = self.send_message(agent_name, message)
|
180
175
|
|
181
176
|
duration = time.time() - start_time
|
182
|
-
logger.info(
|
183
|
-
f"✅ Broadcast completed in {duration:.3f}s "
|
184
|
-
f"({len(responses)} responses)"
|
185
|
-
)
|
177
|
+
logger.info(f"✅ Broadcast completed in {duration:.3f}s ({len(responses)} responses)")
|
186
178
|
|
187
179
|
return responses
|
188
180
|
|
189
|
-
def _handle_tool_request(
|
190
|
-
self, agent_name: str, tool_request: Dict[str, Any]
|
191
|
-
) -> Dict[str, Any]:
|
181
|
+
def _handle_tool_request(self, agent_name: str, tool_request: Dict[str, Any]) -> Dict[str, Any]:
|
192
182
|
"""
|
193
183
|
Handle tool execution request from an agent.
|
194
184
|
|
@@ -213,9 +203,7 @@ class Orchestrator:
|
|
213
203
|
result = execute_tool(tool_name, tool_args)
|
214
204
|
duration = time.time() - start_time
|
215
205
|
|
216
|
-
logger.info(
|
217
|
-
f"✅ Tool '{tool_name}' executed successfully in {duration:.3f}s"
|
218
|
-
)
|
206
|
+
logger.info(f"✅ Tool '{tool_name}' executed successfully in {duration:.3f}s")
|
219
207
|
logger.debug(f"🔧 Tool result: {result}")
|
220
208
|
|
221
209
|
return {"tool_result": {"name": tool_name, "result": result}}
|
@@ -254,9 +242,7 @@ class Orchestrator:
|
|
254
242
|
self.conversation_history = self.conversation_history[-1000:]
|
255
243
|
logger.debug("🗂️ Conversation history trimmed to 1000 entries")
|
256
244
|
|
257
|
-
def get_conversation_history(
|
258
|
-
self, limit: Optional[int] = None
|
259
|
-
) -> List[Dict[str, Any]]:
|
245
|
+
def get_conversation_history(self, limit: Optional[int] = None) -> List[Dict[str, Any]]:
|
260
246
|
"""
|
261
247
|
Get conversation history.
|
262
248
|
|
@@ -284,13 +270,9 @@ class Orchestrator:
|
|
284
270
|
"agent_names": list(self.agents.keys()),
|
285
271
|
"conversation_entries": len(self.conversation_history),
|
286
272
|
"start_time": (
|
287
|
-
self.conversation_history[0]["timestamp"]
|
288
|
-
if self.conversation_history
|
289
|
-
else None
|
273
|
+
self.conversation_history[0]["timestamp"] if self.conversation_history else None
|
290
274
|
),
|
291
275
|
"last_activity": (
|
292
|
-
self.conversation_history[-1]["timestamp"]
|
293
|
-
if self.conversation_history
|
294
|
-
else None
|
276
|
+
self.conversation_history[-1]["timestamp"] if self.conversation_history else None
|
295
277
|
),
|
296
278
|
}
|
dacp/tools.py
CHANGED
@@ -43,9 +43,7 @@ def execute_tool(name: str, args: Dict[str, Any]) -> Dict[str, Any]:
|
|
43
43
|
"""
|
44
44
|
if name not in TOOL_REGISTRY:
|
45
45
|
available_tools = list(TOOL_REGISTRY.keys())
|
46
|
-
logger.error(
|
47
|
-
f"❌ Tool '{name}' not found. " f"Available tools: {available_tools}"
|
48
|
-
)
|
46
|
+
logger.error(f"❌ Tool '{name}' not found. Available tools: {available_tools}")
|
49
47
|
raise ValueError(f"Tool '{name}' not found. Available tools: {available_tools}")
|
50
48
|
|
51
49
|
logger.debug(f"🛠️ Executing tool '{name}' with args: {args}")
|
dacp/workflow.py
CHANGED
@@ -17,6 +17,7 @@ logger = logging.getLogger("dacp.workflow")
|
|
17
17
|
|
18
18
|
class TaskStatus(Enum):
|
19
19
|
"""Task status enumeration."""
|
20
|
+
|
20
21
|
PENDING = "pending"
|
21
22
|
ASSIGNED = "assigned"
|
22
23
|
IN_PROGRESS = "in_progress"
|
@@ -27,6 +28,7 @@ class TaskStatus(Enum):
|
|
27
28
|
|
28
29
|
class TaskPriority(Enum):
|
29
30
|
"""Task priority enumeration."""
|
31
|
+
|
30
32
|
LOW = 1
|
31
33
|
NORMAL = 2
|
32
34
|
HIGH = 3
|
@@ -36,6 +38,7 @@ class TaskPriority(Enum):
|
|
36
38
|
@dataclass
|
37
39
|
class Task:
|
38
40
|
"""Represents a task in the workflow system."""
|
41
|
+
|
39
42
|
id: str
|
40
43
|
type: str
|
41
44
|
data: Dict[str, Any]
|
@@ -74,6 +77,7 @@ class Task:
|
|
74
77
|
@dataclass
|
75
78
|
class WorkflowRule:
|
76
79
|
"""Defines routing rules for agent-to-agent communication."""
|
80
|
+
|
77
81
|
source_task_type: str
|
78
82
|
target_agent: str
|
79
83
|
target_task_type: str
|
@@ -85,7 +89,7 @@ class WorkflowRule:
|
|
85
89
|
class TaskBoard:
|
86
90
|
"""Central task board for managing agent-to-agent tasks."""
|
87
91
|
|
88
|
-
def __init__(self):
|
92
|
+
def __init__(self) -> None:
|
89
93
|
self.tasks: Dict[str, Task] = {}
|
90
94
|
self.agent_queues: Dict[str, List[str]] = {}
|
91
95
|
self.completed_tasks: List[str] = []
|
@@ -98,11 +102,11 @@ class TaskBoard:
|
|
98
102
|
source_agent: str,
|
99
103
|
target_agent: Optional[str] = None,
|
100
104
|
priority: TaskPriority = TaskPriority.NORMAL,
|
101
|
-
dependencies: List[str] = None,
|
105
|
+
dependencies: Optional[List[str]] = None,
|
102
106
|
) -> str:
|
103
107
|
"""Add a new task to the board."""
|
104
108
|
task_id = str(uuid.uuid4())
|
105
|
-
|
109
|
+
|
106
110
|
task = Task(
|
107
111
|
id=task_id,
|
108
112
|
type=task_type,
|
@@ -145,7 +149,7 @@ class TaskBoard:
|
|
145
149
|
|
146
150
|
# Sort by priority (higher first) then by creation time (older first)
|
147
151
|
available_tasks.sort(key=lambda t: (-t.priority.value, t.created_at))
|
148
|
-
|
152
|
+
|
149
153
|
next_task = available_tasks[0]
|
150
154
|
next_task.status = TaskStatus.IN_PROGRESS
|
151
155
|
|
@@ -153,10 +157,7 @@ class TaskBoard:
|
|
153
157
|
return next_task
|
154
158
|
|
155
159
|
def complete_task(
|
156
|
-
self,
|
157
|
-
task_id: str,
|
158
|
-
result: Dict[str, Any],
|
159
|
-
trigger_rules: bool = True
|
160
|
+
self, task_id: str, result: Dict[str, Any], trigger_rules: bool = True
|
160
161
|
) -> None:
|
161
162
|
"""Mark a task as completed and trigger workflow rules."""
|
162
163
|
if task_id not in self.tasks:
|
@@ -240,9 +241,7 @@ class TaskBoard:
|
|
240
241
|
priority=rule.priority,
|
241
242
|
)
|
242
243
|
|
243
|
-
logger.info(
|
244
|
-
f"🔄 Workflow rule triggered: {completed_task.id} → {new_task_id}"
|
245
|
-
)
|
244
|
+
logger.info(f"🔄 Workflow rule triggered: {completed_task.id} → {new_task_id}")
|
246
245
|
|
247
246
|
def get_task_status(self, task_id: str) -> Optional[Dict[str, Any]]:
|
248
247
|
"""Get task status and details."""
|
@@ -261,13 +260,15 @@ class TaskBoard:
|
|
261
260
|
for task_id in queue:
|
262
261
|
if task_id in self.tasks:
|
263
262
|
task = self.tasks[task_id]
|
264
|
-
task_details.append(
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
263
|
+
task_details.append(
|
264
|
+
{
|
265
|
+
"id": task_id,
|
266
|
+
"type": task.type,
|
267
|
+
"status": task.status.value,
|
268
|
+
"priority": task.priority.value,
|
269
|
+
"created_at": task.created_at,
|
270
|
+
}
|
271
|
+
)
|
271
272
|
|
272
273
|
return {
|
273
274
|
"agent": agent_name,
|
@@ -277,7 +278,7 @@ class TaskBoard:
|
|
277
278
|
|
278
279
|
def get_workflow_summary(self) -> Dict[str, Any]:
|
279
280
|
"""Get overall workflow summary."""
|
280
|
-
status_counts = {}
|
281
|
+
status_counts: Dict[str, int] = {}
|
281
282
|
for task in self.tasks.values():
|
282
283
|
status = task.status.value
|
283
284
|
status_counts[status] = status_counts.get(status, 0) + 1
|
@@ -285,9 +286,7 @@ class TaskBoard:
|
|
285
286
|
return {
|
286
287
|
"total_tasks": len(self.tasks),
|
287
288
|
"status_counts": status_counts,
|
288
|
-
"agent_queues": {
|
289
|
-
agent: len(queue) for agent, queue in self.agent_queues.items()
|
290
|
-
},
|
289
|
+
"agent_queues": {agent: len(queue) for agent, queue in self.agent_queues.items()},
|
291
290
|
"completed_tasks": len(self.completed_tasks),
|
292
291
|
"workflow_rules": len(self.workflow_rules),
|
293
292
|
}
|
@@ -296,7 +295,7 @@ class TaskBoard:
|
|
296
295
|
class WorkflowOrchestrator:
|
297
296
|
"""Enhanced orchestrator with workflow and agent-to-agent communication."""
|
298
297
|
|
299
|
-
def __init__(self, orchestrator):
|
298
|
+
def __init__(self, orchestrator: Any) -> None:
|
300
299
|
"""Initialize with a base orchestrator."""
|
301
300
|
self.orchestrator = orchestrator
|
302
301
|
self.task_board = TaskBoard()
|
@@ -358,7 +357,13 @@ class WorkflowOrchestrator:
|
|
358
357
|
|
359
358
|
if "error" in response:
|
360
359
|
self.task_board.fail_task(task.id, response["error"])
|
361
|
-
results.append(
|
360
|
+
results.append(
|
361
|
+
{
|
362
|
+
"task_id": task.id,
|
363
|
+
"status": "failed",
|
364
|
+
"error": response["error"],
|
365
|
+
}
|
366
|
+
)
|
362
367
|
else:
|
363
368
|
self.task_board.complete_task(task.id, response)
|
364
369
|
results.append({"task_id": task.id, "status": "completed", "result": response})
|
@@ -406,4 +411,4 @@ class WorkflowOrchestrator:
|
|
406
411
|
agent: self.task_board.get_agent_queue_status(agent)
|
407
412
|
for agent in self.orchestrator.agents.keys()
|
408
413
|
},
|
409
|
-
}
|
414
|
+
}
|