dacp 0.3.2__py3-none-any.whl → 0.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
dacp/intelligence.py CHANGED
@@ -13,9 +13,7 @@ from typing import Dict, Any, Union
13
13
  logger = logging.getLogger("dacp.intelligence")
14
14
 
15
15
 
16
- def invoke_intelligence(
17
- prompt: str, config: Dict[str, Any]
18
- ) -> Union[str, Dict[str, Any]]:
16
+ def invoke_intelligence(prompt: str, config: Dict[str, Any]) -> Union[str, Dict[str, Any]]:
19
17
  """
20
18
  Invoke an intelligence provider with the given prompt and configuration.
21
19
 
@@ -54,8 +52,7 @@ def invoke_intelligence(
54
52
  else:
55
53
  available_engines = ["openai", "anthropic", "azure", "local"]
56
54
  raise ValueError(
57
- f"Unsupported engine: {engine}. "
58
- f"Available engines: {available_engines}"
55
+ f"Unsupported engine: {engine}. Available engines: {available_engines}"
59
56
  )
60
57
 
61
58
  duration = time.time() - start_time
@@ -66,10 +63,7 @@ def invoke_intelligence(
66
63
 
67
64
  except Exception as e:
68
65
  duration = time.time() - start_time
69
- logger.error(
70
- f"❌ Intelligence call failed after {duration:.3f}s: "
71
- f"{type(e).__name__}: {e}"
72
- )
66
+ logger.error(f"❌ Intelligence call failed after {duration:.3f}s: {type(e).__name__}: {e}")
73
67
  raise
74
68
 
75
69
 
@@ -95,8 +89,7 @@ def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
95
89
  if not api_key:
96
90
  logger.error("❌ OpenAI API key not found")
97
91
  raise ValueError(
98
- "OpenAI API key not found in config or OPENAI_API_KEY "
99
- "environment variable"
92
+ "OpenAI API key not found in config or OPENAI_API_KEY environment variable"
100
93
  )
101
94
 
102
95
  # Configure client
@@ -111,10 +104,7 @@ def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
111
104
  temperature = config.get("temperature", 0.7)
112
105
  max_tokens = config.get("max_tokens", 1000)
113
106
 
114
- logger.debug(
115
- f"🔧 OpenAI config: model={model}, temp={temperature}, "
116
- f"max_tokens={max_tokens}"
117
- )
107
+ logger.debug(f"🔧 OpenAI config: model={model}, temp={temperature}, max_tokens={max_tokens}")
118
108
 
119
109
  # Make API call
120
110
  response = client.chat.completions.create(
@@ -143,8 +133,7 @@ def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
143
133
  if not api_key:
144
134
  logger.error("❌ Anthropic API key not found")
145
135
  raise ValueError(
146
- "Anthropic API key not found in config or ANTHROPIC_API_KEY "
147
- "environment variable"
136
+ "Anthropic API key not found in config or ANTHROPIC_API_KEY environment variable"
148
137
  )
149
138
 
150
139
  # Configure client
@@ -159,10 +148,7 @@ def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
159
148
  temperature = config.get("temperature", 0.7)
160
149
  max_tokens = config.get("max_tokens", 1000)
161
150
 
162
- logger.debug(
163
- f"🔧 Anthropic config: model={model}, temp={temperature}, "
164
- f"max_tokens={max_tokens}"
165
- )
151
+ logger.debug(f"🔧 Anthropic config: model={model}, temp={temperature}, max_tokens={max_tokens}")
166
152
 
167
153
  # Make API call
168
154
  response = client.messages.create(
@@ -195,8 +181,7 @@ def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
195
181
  if not api_key:
196
182
  logger.error("❌ Azure OpenAI API key not found")
197
183
  raise ValueError(
198
- "Azure OpenAI API key not found in config or "
199
- "AZURE_OPENAI_API_KEY environment variable"
184
+ "Azure OpenAI API key not found in config or AZURE_OPENAI_API_KEY environment variable"
200
185
  )
201
186
 
202
187
  if not endpoint:
@@ -207,9 +192,7 @@ def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
207
192
  )
208
193
 
209
194
  # Configure Azure client
210
- client = openai.AzureOpenAI(
211
- api_key=api_key, azure_endpoint=endpoint, api_version=api_version
212
- )
195
+ client = openai.AzureOpenAI(api_key=api_key, azure_endpoint=endpoint, api_version=api_version)
213
196
 
214
197
  # Prepare request
215
198
  model = config.get("model", config.get("deployment_name", "gpt-35-turbo"))
@@ -217,8 +200,7 @@ def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
217
200
  max_tokens = config.get("max_tokens", 1000)
218
201
 
219
202
  logger.debug(
220
- f"🔧 Azure OpenAI config: model={model}, temp={temperature}, "
221
- f"max_tokens={max_tokens}"
203
+ f"🔧 Azure OpenAI config: model={model}, temp={temperature}, max_tokens={max_tokens}"
222
204
  )
223
205
 
224
206
  # Make API call
dacp/json_parser.py ADDED
@@ -0,0 +1,230 @@
1
+ """
2
+ DACP JSON Parser - Robust JSON parsing for agent responses.
3
+
4
+ This module provides enhanced JSON parsing capabilities that can handle
5
+ various LLM response formats and provide intelligent fallbacks.
6
+ """
7
+
8
+ import json
9
+ import re
10
+ import logging
11
+ from typing import Dict, Any, Optional, Union
12
+ from pydantic import BaseModel
13
+
14
+ logger = logging.getLogger("dacp.json_parser")
15
+
16
+
17
+ def extract_json_from_text(text: str) -> Optional[Dict[str, Any]]:
18
+ """
19
+ Extract JSON from text using multiple strategies.
20
+
21
+ Args:
22
+ text: Raw text that might contain JSON
23
+
24
+ Returns:
25
+ Parsed JSON dict or None if no valid JSON found
26
+ """
27
+ if not isinstance(text, str):
28
+ return None
29
+
30
+ logger.debug(f"🔍 Attempting to extract JSON from text: {text[:100]}...")
31
+
32
+ # Strategy 1: Try parsing the entire text as JSON
33
+ try:
34
+ result = json.loads(text.strip())
35
+ logger.debug("✅ Successfully parsed entire text as JSON")
36
+ return result
37
+ except json.JSONDecodeError:
38
+ logger.debug("❌ Failed to parse entire text as JSON")
39
+
40
+ # Strategy 2: Find JSON between braces
41
+ json_start = text.find("{")
42
+ json_end = text.rfind("}") + 1
43
+ if json_start >= 0 and json_end > json_start:
44
+ json_str = text[json_start:json_end]
45
+ try:
46
+ result = json.loads(json_str)
47
+ logger.debug("✅ Successfully extracted JSON between braces")
48
+ return result
49
+ except json.JSONDecodeError:
50
+ logger.debug("❌ Failed to parse JSON between braces")
51
+
52
+ # Strategy 3: Find JSON in code blocks
53
+ code_block_pattern = r"```(?:json)?\s*(\{.*?\})\s*```"
54
+ matches = re.findall(code_block_pattern, text, re.DOTALL)
55
+ for match in matches:
56
+ try:
57
+ result = json.loads(match)
58
+ logger.debug("✅ Successfully extracted JSON from code block")
59
+ return result
60
+ except json.JSONDecodeError:
61
+ continue
62
+
63
+ # Strategy 4: Find JSON after common prefixes
64
+ prefixes = [
65
+ "json response:",
66
+ "response:",
67
+ "output:",
68
+ "result:",
69
+ "here is the json:",
70
+ "the json is:",
71
+ ]
72
+
73
+ for prefix in prefixes:
74
+ prefix_pos = text.lower().find(prefix.lower())
75
+ if prefix_pos >= 0:
76
+ remaining_text = text[prefix_pos + len(prefix) :].strip()
77
+ extracted = extract_json_from_text(remaining_text)
78
+ if extracted:
79
+ logger.debug(f"✅ Successfully extracted JSON after prefix: {prefix}")
80
+ return extracted
81
+
82
+ logger.debug("❌ No valid JSON found in text")
83
+ return None
84
+
85
+
86
+ def create_fallback_response(
87
+ text: str,
88
+ required_fields: Dict[str, Any],
89
+ optional_fields: Optional[Dict[str, Any]] = None,
90
+ ) -> Dict[str, Any]:
91
+ """
92
+ Create a fallback response when JSON parsing fails.
93
+
94
+ Args:
95
+ text: Original LLM response text
96
+ required_fields: Dictionary of required field names and default values
97
+ optional_fields: Dictionary of optional field names and default values
98
+
99
+ Returns:
100
+ Dictionary with required fields filled
101
+ """
102
+ logger.info(f"🔄 Creating fallback response for text: {text[:50]}...")
103
+
104
+ fallback = {}
105
+
106
+ # Fill required fields with defaults or extracted content
107
+ for field_name, default_value in required_fields.items():
108
+ if field_name in ["message", "response_message", "greeting_message"]:
109
+ # Use the original text as the message
110
+ fallback[field_name] = text.strip()
111
+ logger.debug(f"📝 Using text as {field_name}")
112
+ elif field_name in ["agent", "sender_agent", "target_agent"]:
113
+ # Try to extract agent names or use default
114
+ agent_match = re.search(r"agent[:\s]+([a-zA-Z0-9_-]+)", text, re.IGNORECASE)
115
+ if agent_match:
116
+ fallback[field_name] = agent_match.group(1)
117
+ logger.debug(f"🎯 Extracted agent name: {agent_match.group(1)}")
118
+ else:
119
+ fallback[field_name] = default_value or "unknown"
120
+ logger.debug(f"🔧 Using default for {field_name}: {fallback[field_name]}")
121
+ else:
122
+ fallback[field_name] = default_value
123
+ logger.debug(f"⚙️ Setting {field_name} to default: {default_value}")
124
+
125
+ # Fill optional fields if provided
126
+ if optional_fields:
127
+ for field_name, default_value in optional_fields.items():
128
+ fallback[field_name] = default_value
129
+ logger.debug(f"📋 Adding optional field {field_name}: {default_value}")
130
+
131
+ logger.info(f"✅ Created fallback response with {len(fallback)} fields")
132
+ return fallback
133
+
134
+
135
+ def robust_json_parse(
136
+ response: Union[str, Dict[str, Any], BaseModel],
137
+ target_model: type,
138
+ required_fields: Dict[str, Any],
139
+ optional_fields: Optional[Dict[str, Any]] = None,
140
+ ) -> Any:
141
+ """
142
+ Robust JSON parsing with intelligent fallbacks.
143
+
144
+ Args:
145
+ response: LLM response (string, dict, or Pydantic model)
146
+ target_model: Pydantic model class to create
147
+ required_fields: Required fields with default values
148
+ optional_fields: Optional fields with default values
149
+
150
+ Returns:
151
+ Instance of target_model
152
+
153
+ Raises:
154
+ ValueError: If parsing fails completely
155
+ """
156
+ logger.debug(
157
+ f"🔧 Parsing response of type {type(response).__name__} into {target_model.__name__}"
158
+ )
159
+
160
+ # If already the target model, return as-is
161
+ if isinstance(response, target_model):
162
+ logger.debug("✅ Response is already target model")
163
+ return response
164
+
165
+ # If dict, try to create model directly
166
+ if isinstance(response, dict):
167
+ try:
168
+ result = target_model(**response)
169
+ logger.debug("✅ Successfully created model from dict")
170
+ return result
171
+ except Exception as e:
172
+ logger.debug(f"❌ Failed to create model from dict: {e}")
173
+
174
+ # If string, try JSON extraction
175
+ if isinstance(response, str):
176
+ extracted_json = extract_json_from_text(response)
177
+
178
+ if extracted_json:
179
+ try:
180
+ result = target_model(**extracted_json)
181
+ logger.debug("✅ Successfully created model from extracted JSON")
182
+ return result
183
+ except Exception as e:
184
+ logger.debug(f"❌ Failed to create model from extracted JSON: {e}")
185
+
186
+ # Create fallback response
187
+ logger.info("🔄 Creating fallback response for string input")
188
+ fallback_data = create_fallback_response(response, required_fields, optional_fields)
189
+
190
+ try:
191
+ result = target_model(**fallback_data)
192
+ logger.info("✅ Successfully created model from fallback data")
193
+ return result
194
+ except Exception as e:
195
+ logger.error(f"❌ Failed to create fallback response: {e}")
196
+ raise ValueError(f"Failed to create fallback response: {e}")
197
+
198
+ # Unexpected response type
199
+ error_msg = f"Unable to parse response of type {type(response)}: {response}"
200
+ logger.error(f"❌ {error_msg}")
201
+ raise ValueError(error_msg)
202
+
203
+
204
+ def parse_with_fallback(response: Any, model_class: type, **field_defaults: Any) -> Any:
205
+ """
206
+ Convenience function for parsing with automatic field detection.
207
+
208
+ Args:
209
+ response: LLM response to parse
210
+ model_class: Pydantic model class
211
+ **field_defaults: Default values for fields (field_name=default_value)
212
+
213
+ Returns:
214
+ Instance of model_class
215
+ """
216
+ # Extract required fields from model
217
+ required_fields = {}
218
+ optional_fields = {}
219
+
220
+ # Get field info from Pydantic model
221
+ if hasattr(model_class, "model_fields"):
222
+ for field_name, field_info in model_class.model_fields.items():
223
+ default_value = field_defaults.get(field_name, "")
224
+
225
+ if field_info.is_required():
226
+ required_fields[field_name] = default_value
227
+ else:
228
+ optional_fields[field_name] = field_info.default
229
+
230
+ return robust_json_parse(response, model_class, required_fields, optional_fields)
dacp/logging_config.py CHANGED
@@ -32,9 +32,7 @@ def setup_dacp_logging(
32
32
  log_format = "%(name)s - %(levelname)s - %(message)s"
33
33
  elif format_style == "detailed":
34
34
  if include_timestamp:
35
- log_format = (
36
- "%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s"
37
- )
35
+ log_format = "%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s"
38
36
  else:
39
37
  log_format = "%(name)s:%(lineno)d - %(levelname)s - %(message)s"
40
38
  elif format_style == "emoji":
dacp/orchestrator.py CHANGED
@@ -62,10 +62,7 @@ class Orchestrator:
62
62
  raise ValueError("Agent must inherit from dacp.Agent base class")
63
63
 
64
64
  self.agents[name] = agent
65
- logger.info(
66
- f"✅ Agent '{name}' registered successfully "
67
- f"(type: {type(agent).__name__})"
68
- )
65
+ logger.info(f"✅ Agent '{name}' registered successfully (type: {type(agent).__name__})")
69
66
  logger.debug(f"📊 Total registered agents: {len(self.agents)}")
70
67
 
71
68
  def unregister_agent(self, name: str) -> bool:
@@ -120,6 +117,17 @@ class Orchestrator:
120
117
  # Call the agent's message handler
121
118
  response = agent.handle_message(message)
122
119
 
120
+ # Handle Pydantic models by converting to dict
121
+ if hasattr(response, "model_dump"):
122
+ logger.debug(f"📊 Converting Pydantic model to dict: {type(response).__name__}")
123
+ response = response.model_dump()
124
+ elif not isinstance(response, dict):
125
+ logger.debug(f"📊 Converting response to dict: {type(response)}")
126
+ if hasattr(response, "__dict__"):
127
+ response = response.__dict__
128
+ else:
129
+ response = {"result": str(response)}
130
+
123
131
  duration = time.time() - start_time
124
132
  logger.info(f"✅ Agent '{agent_name}' responded in {duration:.3f}s")
125
133
  logger.debug(f"📤 Agent response: {response}")
@@ -127,9 +135,7 @@ class Orchestrator:
127
135
  # Check if agent requested tool execution
128
136
  if isinstance(response, dict) and "tool_request" in response:
129
137
  logger.info(f"🔧 Agent '{agent_name}' requested tool execution")
130
- response = self._handle_tool_request(
131
- agent_name, response["tool_request"]
132
- )
138
+ response = self._handle_tool_request(agent_name, response["tool_request"])
133
139
 
134
140
  # Log the conversation
135
141
  self._log_conversation(agent_name, message, response, duration)
@@ -168,16 +174,11 @@ class Orchestrator:
168
174
  responses[agent_name] = self.send_message(agent_name, message)
169
175
 
170
176
  duration = time.time() - start_time
171
- logger.info(
172
- f"✅ Broadcast completed in {duration:.3f}s "
173
- f"({len(responses)} responses)"
174
- )
177
+ logger.info(f"✅ Broadcast completed in {duration:.3f}s ({len(responses)} responses)")
175
178
 
176
179
  return responses
177
180
 
178
- def _handle_tool_request(
179
- self, agent_name: str, tool_request: Dict[str, Any]
180
- ) -> Dict[str, Any]:
181
+ def _handle_tool_request(self, agent_name: str, tool_request: Dict[str, Any]) -> Dict[str, Any]:
181
182
  """
182
183
  Handle tool execution request from an agent.
183
184
 
@@ -202,9 +203,7 @@ class Orchestrator:
202
203
  result = execute_tool(tool_name, tool_args)
203
204
  duration = time.time() - start_time
204
205
 
205
- logger.info(
206
- f"✅ Tool '{tool_name}' executed successfully in {duration:.3f}s"
207
- )
206
+ logger.info(f"✅ Tool '{tool_name}' executed successfully in {duration:.3f}s")
208
207
  logger.debug(f"🔧 Tool result: {result}")
209
208
 
210
209
  return {"tool_result": {"name": tool_name, "result": result}}
@@ -229,6 +228,7 @@ class Orchestrator:
229
228
  entry = {
230
229
  "timestamp": time.time(),
231
230
  "session_id": self.session_id,
231
+ "agent": agent_name,
232
232
  "agent_name": agent_name,
233
233
  "message": message,
234
234
  "response": response,
@@ -242,9 +242,7 @@ class Orchestrator:
242
242
  self.conversation_history = self.conversation_history[-1000:]
243
243
  logger.debug("🗂️ Conversation history trimmed to 1000 entries")
244
244
 
245
- def get_conversation_history(
246
- self, limit: Optional[int] = None
247
- ) -> List[Dict[str, Any]]:
245
+ def get_conversation_history(self, limit: Optional[int] = None) -> List[Dict[str, Any]]:
248
246
  """
249
247
  Get conversation history.
250
248
 
@@ -272,13 +270,9 @@ class Orchestrator:
272
270
  "agent_names": list(self.agents.keys()),
273
271
  "conversation_entries": len(self.conversation_history),
274
272
  "start_time": (
275
- self.conversation_history[0]["timestamp"]
276
- if self.conversation_history
277
- else None
273
+ self.conversation_history[0]["timestamp"] if self.conversation_history else None
278
274
  ),
279
275
  "last_activity": (
280
- self.conversation_history[-1]["timestamp"]
281
- if self.conversation_history
282
- else None
276
+ self.conversation_history[-1]["timestamp"] if self.conversation_history else None
283
277
  ),
284
278
  }
dacp/tools.py CHANGED
@@ -43,9 +43,7 @@ def execute_tool(name: str, args: Dict[str, Any]) -> Dict[str, Any]:
43
43
  """
44
44
  if name not in TOOL_REGISTRY:
45
45
  available_tools = list(TOOL_REGISTRY.keys())
46
- logger.error(
47
- f"❌ Tool '{name}' not found. " f"Available tools: {available_tools}"
48
- )
46
+ logger.error(f"❌ Tool '{name}' not found. Available tools: {available_tools}")
49
47
  raise ValueError(f"Tool '{name}' not found. Available tools: {available_tools}")
50
48
 
51
49
  logger.debug(f"🛠️ Executing tool '{name}' with args: {args}")