dacp 0.1.0__py3-none-any.whl → 0.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dacp/__init__.py +18 -11
- dacp/intelligence.py +272 -0
- dacp/llm.py +28 -15
- dacp/main.py +15 -0
- dacp/orchestrator.py +260 -0
- dacp/tools.py +36 -11
- dacp-0.3.0.dist-info/METADATA +369 -0
- dacp-0.3.0.dist-info/RECORD +14 -0
- dacp-0.1.0.dist-info/METADATA +0 -114
- dacp-0.1.0.dist-info/RECORD +0 -11
- {dacp-0.1.0.dist-info → dacp-0.3.0.dist-info}/WHEEL +0 -0
- {dacp-0.1.0.dist-info → dacp-0.3.0.dist-info}/licenses/LICENSE +0 -0
- {dacp-0.1.0.dist-info → dacp-0.3.0.dist-info}/top_level.txt +0 -0
dacp/__init__.py
CHANGED
@@ -1,12 +1,8 @@
|
|
1
1
|
"""
|
2
|
-
DACP
|
3
|
-
|
4
|
-
A Python library for managing LLM/agent communications and tool function calls
|
5
|
-
following the OAS Open Agent Specification.
|
2
|
+
DACP (Declarative Agent Communication Protocol)
|
3
|
+
A protocol for managing LLM/agent communications and tool function calls.
|
6
4
|
"""
|
7
5
|
|
8
|
-
from .tools import register_tool, run_tool, TOOL_REGISTRY
|
9
|
-
from .llm import call_llm
|
10
6
|
from .protocol import (
|
11
7
|
parse_agent_response,
|
12
8
|
is_tool_request,
|
@@ -15,17 +11,28 @@ from .protocol import (
|
|
15
11
|
is_final_response,
|
16
12
|
get_final_response,
|
17
13
|
)
|
14
|
+
from .tools import (
|
15
|
+
register_tool, run_tool, TOOL_REGISTRY, file_writer
|
16
|
+
)
|
17
|
+
from .llm import call_llm
|
18
|
+
from .intelligence import invoke_intelligence
|
19
|
+
from .orchestrator import Orchestrator, Agent
|
20
|
+
|
21
|
+
__version__ = "0.3.0"
|
18
22
|
|
19
|
-
__version__ = "0.1.0"
|
20
23
|
__all__ = [
|
21
|
-
"register_tool",
|
22
|
-
"run_tool",
|
23
|
-
"TOOL_REGISTRY",
|
24
|
-
"call_llm",
|
25
24
|
"parse_agent_response",
|
26
25
|
"is_tool_request",
|
27
26
|
"get_tool_request",
|
28
27
|
"wrap_tool_result",
|
29
28
|
"is_final_response",
|
30
29
|
"get_final_response",
|
30
|
+
"register_tool",
|
31
|
+
"run_tool",
|
32
|
+
"TOOL_REGISTRY",
|
33
|
+
"file_writer",
|
34
|
+
"call_llm",
|
35
|
+
"invoke_intelligence",
|
36
|
+
"Orchestrator",
|
37
|
+
"Agent",
|
31
38
|
]
|
dacp/intelligence.py
ADDED
@@ -0,0 +1,272 @@
|
|
1
|
+
"""
|
2
|
+
DACP Intelligence Module - Generic LLM provider interface.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import logging
|
7
|
+
from typing import Dict, Any, Optional
|
8
|
+
|
9
|
+
log = logging.getLogger(__name__)
|
10
|
+
|
11
|
+
|
12
|
+
class IntelligenceError(Exception):
|
13
|
+
"""Base exception for intelligence provider errors."""
|
14
|
+
pass
|
15
|
+
|
16
|
+
|
17
|
+
class UnsupportedProviderError(IntelligenceError):
|
18
|
+
"""Raised when an unsupported intelligence provider is requested."""
|
19
|
+
pass
|
20
|
+
|
21
|
+
|
22
|
+
class ConfigurationError(IntelligenceError):
|
23
|
+
"""Raised when intelligence configuration is invalid."""
|
24
|
+
pass
|
25
|
+
|
26
|
+
|
27
|
+
def invoke_intelligence(prompt: str, config: Dict[str, Any]) -> str:
|
28
|
+
"""
|
29
|
+
Invoke an intelligence provider (LLM) with the given prompt and configuration.
|
30
|
+
|
31
|
+
Args:
|
32
|
+
prompt: The input prompt to send to the intelligence provider
|
33
|
+
config: Configuration dictionary containing provider details
|
34
|
+
|
35
|
+
Returns:
|
36
|
+
Response string from the intelligence provider
|
37
|
+
|
38
|
+
Raises:
|
39
|
+
UnsupportedProviderError: If the provider is not supported
|
40
|
+
ConfigurationError: If the configuration is invalid
|
41
|
+
IntelligenceError: For other provider-specific errors
|
42
|
+
"""
|
43
|
+
engine = config.get("engine")
|
44
|
+
if not engine:
|
45
|
+
raise ConfigurationError("Missing 'engine' in intelligence configuration")
|
46
|
+
|
47
|
+
engine = engine.lower()
|
48
|
+
|
49
|
+
if engine == "openai":
|
50
|
+
return _invoke_openai(prompt, config)
|
51
|
+
elif engine == "anthropic":
|
52
|
+
return _invoke_anthropic(prompt, config)
|
53
|
+
elif engine == "azure":
|
54
|
+
return _invoke_azure_openai(prompt, config)
|
55
|
+
elif engine == "local":
|
56
|
+
return _invoke_local(prompt, config)
|
57
|
+
else:
|
58
|
+
raise UnsupportedProviderError(f"Unsupported intelligence engine: {engine}")
|
59
|
+
|
60
|
+
|
61
|
+
def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
|
62
|
+
"""Invoke OpenAI provider."""
|
63
|
+
try:
|
64
|
+
import openai
|
65
|
+
except ImportError:
|
66
|
+
raise IntelligenceError("OpenAI package not installed. Run: pip install openai")
|
67
|
+
|
68
|
+
model = config.get("model", "gpt-4")
|
69
|
+
api_key = config.get("api_key") or os.getenv("OPENAI_API_KEY")
|
70
|
+
base_url = config.get("endpoint", "https://api.openai.com/v1")
|
71
|
+
temperature = config.get("temperature", 0.7)
|
72
|
+
max_tokens = config.get("max_tokens", 150)
|
73
|
+
|
74
|
+
if not api_key:
|
75
|
+
raise ConfigurationError("OpenAI API key not found in config or OPENAI_API_KEY environment variable")
|
76
|
+
|
77
|
+
try:
|
78
|
+
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
79
|
+
response = client.chat.completions.create(
|
80
|
+
model=model,
|
81
|
+
messages=[{"role": "user", "content": prompt}],
|
82
|
+
temperature=temperature,
|
83
|
+
max_tokens=max_tokens
|
84
|
+
)
|
85
|
+
|
86
|
+
content = response.choices[0].message.content
|
87
|
+
if content is None:
|
88
|
+
raise IntelligenceError("OpenAI returned empty response")
|
89
|
+
return content
|
90
|
+
|
91
|
+
except Exception as e:
|
92
|
+
log.error(f"OpenAI API error: {e}")
|
93
|
+
raise IntelligenceError(f"OpenAI API error: {e}")
|
94
|
+
|
95
|
+
|
96
|
+
def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
|
97
|
+
"""Invoke Anthropic (Claude) provider."""
|
98
|
+
try:
|
99
|
+
import anthropic
|
100
|
+
except ImportError:
|
101
|
+
raise IntelligenceError("Anthropic package not installed. Run: pip install anthropic")
|
102
|
+
|
103
|
+
model = config.get("model", "claude-3-haiku-20240307")
|
104
|
+
api_key = config.get("api_key") or os.getenv("ANTHROPIC_API_KEY")
|
105
|
+
base_url = config.get("endpoint", "https://api.anthropic.com")
|
106
|
+
max_tokens = config.get("max_tokens", 150)
|
107
|
+
temperature = config.get("temperature", 0.7)
|
108
|
+
|
109
|
+
if not api_key:
|
110
|
+
raise ConfigurationError("Anthropic API key not found in config or ANTHROPIC_API_KEY environment variable")
|
111
|
+
|
112
|
+
try:
|
113
|
+
client = anthropic.Anthropic(api_key=api_key, base_url=base_url)
|
114
|
+
response = client.messages.create(
|
115
|
+
model=model,
|
116
|
+
max_tokens=max_tokens,
|
117
|
+
temperature=temperature,
|
118
|
+
messages=[{"role": "user", "content": prompt}]
|
119
|
+
)
|
120
|
+
|
121
|
+
if not response.content or len(response.content) == 0:
|
122
|
+
raise IntelligenceError("Anthropic returned empty response")
|
123
|
+
|
124
|
+
# Anthropic returns a list of content blocks
|
125
|
+
return response.content[0].text
|
126
|
+
|
127
|
+
except Exception as e:
|
128
|
+
log.error(f"Anthropic API error: {e}")
|
129
|
+
raise IntelligenceError(f"Anthropic API error: {e}")
|
130
|
+
|
131
|
+
|
132
|
+
def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
|
133
|
+
"""Invoke Azure OpenAI provider."""
|
134
|
+
try:
|
135
|
+
import openai
|
136
|
+
except ImportError:
|
137
|
+
raise IntelligenceError("OpenAI package not installed. Run: pip install openai")
|
138
|
+
|
139
|
+
model = config.get("model", "gpt-4")
|
140
|
+
api_key = config.get("api_key") or os.getenv("AZURE_OPENAI_API_KEY")
|
141
|
+
endpoint = config.get("endpoint") or os.getenv("AZURE_OPENAI_ENDPOINT")
|
142
|
+
api_version = config.get("api_version", "2024-02-01")
|
143
|
+
temperature = config.get("temperature", 0.7)
|
144
|
+
max_tokens = config.get("max_tokens", 150)
|
145
|
+
|
146
|
+
if not api_key:
|
147
|
+
raise ConfigurationError("Azure OpenAI API key not found in config or AZURE_OPENAI_API_KEY environment variable")
|
148
|
+
|
149
|
+
if not endpoint:
|
150
|
+
raise ConfigurationError("Azure OpenAI endpoint not found in config or AZURE_OPENAI_ENDPOINT environment variable")
|
151
|
+
|
152
|
+
try:
|
153
|
+
client = openai.AzureOpenAI(
|
154
|
+
api_key=api_key,
|
155
|
+
azure_endpoint=endpoint,
|
156
|
+
api_version=api_version
|
157
|
+
)
|
158
|
+
|
159
|
+
response = client.chat.completions.create(
|
160
|
+
model=model,
|
161
|
+
messages=[{"role": "user", "content": prompt}],
|
162
|
+
temperature=temperature,
|
163
|
+
max_tokens=max_tokens
|
164
|
+
)
|
165
|
+
|
166
|
+
content = response.choices[0].message.content
|
167
|
+
if content is None:
|
168
|
+
raise IntelligenceError("Azure OpenAI returned empty response")
|
169
|
+
return content
|
170
|
+
|
171
|
+
except Exception as e:
|
172
|
+
log.error(f"Azure OpenAI API error: {e}")
|
173
|
+
raise IntelligenceError(f"Azure OpenAI API error: {e}")
|
174
|
+
|
175
|
+
|
176
|
+
def _invoke_local(prompt: str, config: Dict[str, Any]) -> str:
|
177
|
+
"""Invoke local LLM provider (e.g., Ollama, local API)."""
|
178
|
+
import requests
|
179
|
+
|
180
|
+
endpoint = config.get("endpoint", "http://localhost:11434/api/generate")
|
181
|
+
model = config.get("model", "llama2")
|
182
|
+
temperature = config.get("temperature", 0.7)
|
183
|
+
max_tokens = config.get("max_tokens", 150)
|
184
|
+
|
185
|
+
try:
|
186
|
+
# Format for Ollama API
|
187
|
+
if "ollama" in endpoint or ":11434" in endpoint:
|
188
|
+
payload = {
|
189
|
+
"model": model,
|
190
|
+
"prompt": prompt,
|
191
|
+
"stream": False,
|
192
|
+
"options": {
|
193
|
+
"temperature": temperature,
|
194
|
+
"num_predict": max_tokens
|
195
|
+
}
|
196
|
+
}
|
197
|
+
else:
|
198
|
+
# Generic local API format
|
199
|
+
payload = {
|
200
|
+
"model": model,
|
201
|
+
"prompt": prompt,
|
202
|
+
"temperature": temperature,
|
203
|
+
"max_tokens": max_tokens
|
204
|
+
}
|
205
|
+
|
206
|
+
response = requests.post(endpoint, json=payload, timeout=30)
|
207
|
+
response.raise_for_status()
|
208
|
+
|
209
|
+
result = response.json()
|
210
|
+
|
211
|
+
# Handle different response formats
|
212
|
+
if "response" in result:
|
213
|
+
return result["response"] # Ollama format
|
214
|
+
elif "text" in result:
|
215
|
+
return result["text"] # Generic format
|
216
|
+
elif "choices" in result and len(result["choices"]) > 0:
|
217
|
+
return result["choices"][0].get("text", "") # OpenAI-compatible format
|
218
|
+
else:
|
219
|
+
raise IntelligenceError(f"Unexpected response format from local provider: {result}")
|
220
|
+
|
221
|
+
except requests.RequestException as e:
|
222
|
+
log.error(f"Local provider request error: {e}")
|
223
|
+
raise IntelligenceError(f"Local provider request error: {e}")
|
224
|
+
except Exception as e:
|
225
|
+
log.error(f"Local provider error: {e}")
|
226
|
+
raise IntelligenceError(f"Local provider error: {e}")
|
227
|
+
|
228
|
+
|
229
|
+
def get_supported_engines() -> list:
|
230
|
+
"""Get list of supported intelligence engines."""
|
231
|
+
return ["openai", "anthropic", "azure", "local"]
|
232
|
+
|
233
|
+
|
234
|
+
def validate_config(config: Dict[str, Any]) -> bool:
|
235
|
+
"""
|
236
|
+
Validate intelligence configuration.
|
237
|
+
|
238
|
+
Args:
|
239
|
+
config: Configuration dictionary
|
240
|
+
|
241
|
+
Returns:
|
242
|
+
True if valid
|
243
|
+
|
244
|
+
Raises:
|
245
|
+
ConfigurationError: If configuration is invalid
|
246
|
+
"""
|
247
|
+
if not isinstance(config, dict):
|
248
|
+
raise ConfigurationError("Configuration must be a dictionary")
|
249
|
+
|
250
|
+
engine = config.get("engine")
|
251
|
+
if not engine:
|
252
|
+
raise ConfigurationError("Missing 'engine' in configuration")
|
253
|
+
|
254
|
+
if engine.lower() not in get_supported_engines():
|
255
|
+
raise ConfigurationError(f"Unsupported engine: {engine}. Supported engines: {get_supported_engines()}")
|
256
|
+
|
257
|
+
# Engine-specific validation
|
258
|
+
engine = engine.lower()
|
259
|
+
|
260
|
+
if engine in ["openai", "azure"]:
|
261
|
+
if not config.get("api_key") and not os.getenv("OPENAI_API_KEY") and not os.getenv("AZURE_OPENAI_API_KEY"):
|
262
|
+
raise ConfigurationError(f"API key required for {engine} engine")
|
263
|
+
|
264
|
+
elif engine == "anthropic":
|
265
|
+
if not config.get("api_key") and not os.getenv("ANTHROPIC_API_KEY"):
|
266
|
+
raise ConfigurationError("API key required for Anthropic engine")
|
267
|
+
|
268
|
+
elif engine == "local":
|
269
|
+
if not config.get("endpoint"):
|
270
|
+
config["endpoint"] = "http://localhost:11434/api/generate" # Default to Ollama
|
271
|
+
|
272
|
+
return True
|
dacp/llm.py
CHANGED
@@ -1,19 +1,32 @@
|
|
1
|
+
"""
|
2
|
+
Legacy LLM module - Provides backward compatibility for call_llm function.
|
3
|
+
Uses the new intelligence module under the hood.
|
4
|
+
"""
|
5
|
+
|
1
6
|
import os
|
2
|
-
import
|
7
|
+
from .intelligence import invoke_intelligence
|
3
8
|
|
4
9
|
|
5
10
|
def call_llm(prompt: str, model: str = "gpt-4") -> str:
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
11
|
+
"""
|
12
|
+
Legacy function for calling LLMs.
|
13
|
+
Maintained for backward compatibility.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
prompt: The input prompt
|
17
|
+
model: The model to use (defaults to gpt-4)
|
18
|
+
|
19
|
+
Returns:
|
20
|
+
Response from the LLM
|
21
|
+
"""
|
22
|
+
# Create OpenAI config for backward compatibility
|
23
|
+
config = {
|
24
|
+
"engine": "openai",
|
25
|
+
"model": model,
|
26
|
+
"api_key": os.getenv("OPENAI_API_KEY"),
|
27
|
+
"endpoint": "https://api.openai.com/v1",
|
28
|
+
"temperature": 0.7,
|
29
|
+
"max_tokens": 150
|
30
|
+
}
|
31
|
+
|
32
|
+
return invoke_intelligence(prompt, config)
|
dacp/main.py
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
from dacp.orchestrator import Orchestrator
|
2
|
+
|
3
|
+
def main():
|
4
|
+
orchestrator = Orchestrator()
|
5
|
+
|
6
|
+
# Agent registers itself with the orchestrator
|
7
|
+
hello_agent = HelloWorldAgent("hello_agent", orchestrator)
|
8
|
+
|
9
|
+
# Orchestrator sends a message to the agent and prints the response
|
10
|
+
input_message = {"name": "Alice"}
|
11
|
+
response = orchestrator.call_agent("hello_agent", input_message)
|
12
|
+
print("Orchestrator received:", response)
|
13
|
+
|
14
|
+
if __name__ == "__main__":
|
15
|
+
main()
|
dacp/orchestrator.py
ADDED
@@ -0,0 +1,260 @@
|
|
1
|
+
"""
|
2
|
+
DACP Orchestrator - Manages agent registration and message routing.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import logging
|
6
|
+
from typing import Dict, Any, Optional, List, Callable
|
7
|
+
import uuid
|
8
|
+
import json
|
9
|
+
from .protocol import (
|
10
|
+
parse_agent_response,
|
11
|
+
is_tool_request,
|
12
|
+
get_tool_request,
|
13
|
+
wrap_tool_result,
|
14
|
+
is_final_response,
|
15
|
+
get_final_response,
|
16
|
+
)
|
17
|
+
from .tools import run_tool
|
18
|
+
|
19
|
+
log = logging.getLogger(__name__)
|
20
|
+
|
21
|
+
|
22
|
+
class Agent:
|
23
|
+
"""Base agent interface that all agents should implement."""
|
24
|
+
|
25
|
+
def handle_message(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
26
|
+
"""Handle incoming messages from the orchestrator."""
|
27
|
+
raise NotImplementedError("Agents must implement handle_message method")
|
28
|
+
|
29
|
+
|
30
|
+
class Orchestrator:
|
31
|
+
"""
|
32
|
+
Central orchestrator for managing agents and routing messages.
|
33
|
+
Handles agent registration, message routing, and tool execution.
|
34
|
+
"""
|
35
|
+
|
36
|
+
def __init__(self):
|
37
|
+
self.agents: Dict[str, Any] = {}
|
38
|
+
self.conversation_history: List[Dict[str, Any]] = []
|
39
|
+
self.session_id = str(uuid.uuid4())
|
40
|
+
|
41
|
+
def register_agent(self, agent_id: str, agent: Any) -> None:
|
42
|
+
"""
|
43
|
+
Register an agent with the orchestrator.
|
44
|
+
|
45
|
+
Args:
|
46
|
+
agent_id: Unique identifier for the agent
|
47
|
+
agent: Agent instance that implements handle_message method
|
48
|
+
"""
|
49
|
+
if not hasattr(agent, 'handle_message'):
|
50
|
+
raise ValueError(f"Agent {agent_id} must implement handle_message method")
|
51
|
+
|
52
|
+
self.agents[agent_id] = agent
|
53
|
+
log.info(f"Registered agent: {agent_id}")
|
54
|
+
|
55
|
+
def unregister_agent(self, agent_id: str) -> bool:
|
56
|
+
"""
|
57
|
+
Unregister an agent from the orchestrator.
|
58
|
+
|
59
|
+
Args:
|
60
|
+
agent_id: ID of the agent to unregister
|
61
|
+
|
62
|
+
Returns:
|
63
|
+
True if agent was found and removed, False otherwise
|
64
|
+
"""
|
65
|
+
if agent_id in self.agents:
|
66
|
+
del self.agents[agent_id]
|
67
|
+
log.info(f"Unregistered agent: {agent_id}")
|
68
|
+
return True
|
69
|
+
return False
|
70
|
+
|
71
|
+
def get_agent(self, agent_id: str) -> Optional[Any]:
|
72
|
+
"""Get an agent by ID."""
|
73
|
+
return self.agents.get(agent_id)
|
74
|
+
|
75
|
+
def list_agents(self) -> List[str]:
|
76
|
+
"""Get list of registered agent IDs."""
|
77
|
+
return list(self.agents.keys())
|
78
|
+
|
79
|
+
def send_message(self, agent_id: str, message: Dict[str, Any]) -> Dict[str, Any]:
|
80
|
+
"""
|
81
|
+
Send a message to a specific agent.
|
82
|
+
|
83
|
+
Args:
|
84
|
+
agent_id: ID of the target agent
|
85
|
+
message: Message to send
|
86
|
+
|
87
|
+
Returns:
|
88
|
+
Response from the agent
|
89
|
+
|
90
|
+
Raises:
|
91
|
+
ValueError: If agent_id is not found
|
92
|
+
"""
|
93
|
+
if agent_id not in self.agents:
|
94
|
+
raise ValueError(f"Agent {agent_id} not found")
|
95
|
+
|
96
|
+
agent = self.agents[agent_id]
|
97
|
+
|
98
|
+
# Add metadata to message
|
99
|
+
enriched_message = {
|
100
|
+
"session_id": self.session_id,
|
101
|
+
"timestamp": self._get_timestamp(),
|
102
|
+
**message
|
103
|
+
}
|
104
|
+
|
105
|
+
try:
|
106
|
+
# Send message to agent
|
107
|
+
response = agent.handle_message(enriched_message)
|
108
|
+
|
109
|
+
# Log the interaction
|
110
|
+
self.conversation_history.append({
|
111
|
+
"type": "message",
|
112
|
+
"agent_id": agent_id,
|
113
|
+
"message": enriched_message,
|
114
|
+
"response": response,
|
115
|
+
"timestamp": self._get_timestamp()
|
116
|
+
})
|
117
|
+
|
118
|
+
return response
|
119
|
+
|
120
|
+
except Exception as e:
|
121
|
+
error_response = {
|
122
|
+
"error": f"Agent {agent_id} failed to handle message: {str(e)}",
|
123
|
+
"agent_id": agent_id
|
124
|
+
}
|
125
|
+
log.error(f"Error sending message to agent {agent_id}: {e}")
|
126
|
+
return error_response
|
127
|
+
|
128
|
+
def broadcast_message(self, message: Dict[str, Any], exclude_agents: List[str] = None) -> Dict[str, Dict[str, Any]]:
|
129
|
+
"""
|
130
|
+
Broadcast a message to all registered agents.
|
131
|
+
|
132
|
+
Args:
|
133
|
+
message: Message to broadcast
|
134
|
+
exclude_agents: List of agent IDs to exclude from broadcast
|
135
|
+
|
136
|
+
Returns:
|
137
|
+
Dictionary mapping agent_id to response
|
138
|
+
"""
|
139
|
+
exclude_agents = exclude_agents or []
|
140
|
+
responses = {}
|
141
|
+
|
142
|
+
for agent_id in self.agents:
|
143
|
+
if agent_id not in exclude_agents:
|
144
|
+
try:
|
145
|
+
responses[agent_id] = self.send_message(agent_id, message)
|
146
|
+
except Exception as e:
|
147
|
+
responses[agent_id] = {"error": str(e)}
|
148
|
+
|
149
|
+
return responses
|
150
|
+
|
151
|
+
def handle_tool_request(self, tool_name: str, args: Dict[str, Any]) -> Dict[str, Any]:
|
152
|
+
"""
|
153
|
+
Handle a tool execution request.
|
154
|
+
|
155
|
+
Args:
|
156
|
+
tool_name: Name of the tool to execute
|
157
|
+
args: Arguments for the tool
|
158
|
+
|
159
|
+
Returns:
|
160
|
+
Tool execution result wrapped in protocol format
|
161
|
+
"""
|
162
|
+
try:
|
163
|
+
result = run_tool(tool_name, args)
|
164
|
+
return wrap_tool_result(tool_name, result)
|
165
|
+
except Exception as e:
|
166
|
+
error_result = {
|
167
|
+
"success": False,
|
168
|
+
"error": str(e),
|
169
|
+
"tool_name": tool_name,
|
170
|
+
"args": args
|
171
|
+
}
|
172
|
+
return wrap_tool_result(tool_name, error_result)
|
173
|
+
|
174
|
+
def process_agent_response(self, agent_id: str, response: Any) -> Dict[str, Any]:
|
175
|
+
"""
|
176
|
+
Process an agent response, handling tool requests and final responses.
|
177
|
+
|
178
|
+
Args:
|
179
|
+
agent_id: ID of the agent that sent the response
|
180
|
+
response: Agent response (string or dict)
|
181
|
+
|
182
|
+
Returns:
|
183
|
+
Processed response
|
184
|
+
"""
|
185
|
+
try:
|
186
|
+
# Parse the agent response
|
187
|
+
parsed_response = parse_agent_response(response)
|
188
|
+
|
189
|
+
# Check if it's a tool request
|
190
|
+
if is_tool_request(parsed_response):
|
191
|
+
tool_name, args = get_tool_request(parsed_response)
|
192
|
+
log.info(f"Agent {agent_id} requested tool: {tool_name}")
|
193
|
+
|
194
|
+
# Execute the tool
|
195
|
+
tool_result = self.handle_tool_request(tool_name, args)
|
196
|
+
|
197
|
+
# Log tool execution
|
198
|
+
self.conversation_history.append({
|
199
|
+
"type": "tool_execution",
|
200
|
+
"agent_id": agent_id,
|
201
|
+
"tool_name": tool_name,
|
202
|
+
"args": args,
|
203
|
+
"result": tool_result,
|
204
|
+
"timestamp": self._get_timestamp()
|
205
|
+
})
|
206
|
+
|
207
|
+
return tool_result
|
208
|
+
|
209
|
+
# Check if it's a final response
|
210
|
+
elif is_final_response(parsed_response):
|
211
|
+
final_response = get_final_response(parsed_response)
|
212
|
+
log.info(f"Agent {agent_id} sent final response")
|
213
|
+
return final_response
|
214
|
+
|
215
|
+
else:
|
216
|
+
# Return the parsed response as-is
|
217
|
+
return parsed_response
|
218
|
+
|
219
|
+
except Exception as e:
|
220
|
+
log.error(f"Error processing agent response from {agent_id}: {e}")
|
221
|
+
return {
|
222
|
+
"error": f"Failed to process response: {str(e)}",
|
223
|
+
"original_response": response
|
224
|
+
}
|
225
|
+
|
226
|
+
def get_conversation_history(self, agent_id: str = None) -> List[Dict[str, Any]]:
|
227
|
+
"""
|
228
|
+
Get conversation history, optionally filtered by agent.
|
229
|
+
|
230
|
+
Args:
|
231
|
+
agent_id: Optional agent ID to filter by
|
232
|
+
|
233
|
+
Returns:
|
234
|
+
List of conversation entries
|
235
|
+
"""
|
236
|
+
if agent_id:
|
237
|
+
return [
|
238
|
+
entry for entry in self.conversation_history
|
239
|
+
if entry.get("agent_id") == agent_id
|
240
|
+
]
|
241
|
+
return self.conversation_history.copy()
|
242
|
+
|
243
|
+
def clear_history(self) -> None:
|
244
|
+
"""Clear conversation history."""
|
245
|
+
self.conversation_history.clear()
|
246
|
+
log.info("Conversation history cleared")
|
247
|
+
|
248
|
+
def get_session_info(self) -> Dict[str, Any]:
|
249
|
+
"""Get current session information."""
|
250
|
+
return {
|
251
|
+
"session_id": self.session_id,
|
252
|
+
"registered_agents": self.list_agents(),
|
253
|
+
"conversation_length": len(self.conversation_history),
|
254
|
+
"timestamp": self._get_timestamp()
|
255
|
+
}
|
256
|
+
|
257
|
+
def _get_timestamp(self) -> str:
|
258
|
+
"""Get current timestamp in ISO format."""
|
259
|
+
from datetime import datetime
|
260
|
+
return datetime.utcnow().isoformat() + "Z"
|
dacp/tools.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
1
|
from typing import Dict, Any, Callable
|
2
|
+
from pathlib import Path
|
2
3
|
|
3
4
|
TOOL_REGISTRY: Dict[str, Callable[..., Dict[str, Any]]] = {}
|
4
5
|
|
@@ -12,19 +13,43 @@ def run_tool(tool_id: str, args: Dict[str, Any]) -> Dict[str, Any]:
|
|
12
13
|
"""Run a registered tool with the given arguments."""
|
13
14
|
if tool_id not in TOOL_REGISTRY:
|
14
15
|
raise ValueError(f"Unknown tool: {tool_id}")
|
16
|
+
|
15
17
|
tool_func = TOOL_REGISTRY[tool_id]
|
16
18
|
return tool_func(**args)
|
17
19
|
|
18
20
|
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
21
|
+
def file_writer(path: str, content: str) -> Dict[str, Any]:
|
22
|
+
"""
|
23
|
+
Write content to a file, creating parent directories if they don't exist.
|
24
|
+
|
25
|
+
Args:
|
26
|
+
path: File path to write to
|
27
|
+
content: Content to write to the file
|
28
|
+
|
29
|
+
Returns:
|
30
|
+
Dict with success status and file path
|
31
|
+
"""
|
32
|
+
try:
|
33
|
+
# Create parent directories if they don't exist
|
34
|
+
Path(path).parent.mkdir(parents=True, exist_ok=True)
|
35
|
+
|
36
|
+
# Write the content to the file
|
37
|
+
with open(path, "w", encoding="utf-8") as f:
|
38
|
+
f.write(content)
|
39
|
+
|
40
|
+
return {
|
41
|
+
"success": True,
|
42
|
+
"path": path,
|
43
|
+
"message": f"Successfully wrote {len(content)} characters to {path}",
|
44
|
+
}
|
45
|
+
except Exception as e:
|
46
|
+
return {
|
47
|
+
"success": False,
|
48
|
+
"path": path,
|
49
|
+
"error": str(e),
|
50
|
+
"message": f"Failed to write to {path}: {e}",
|
51
|
+
}
|
52
|
+
|
53
|
+
|
54
|
+
# Register the built-in file_writer tool
|
30
55
|
register_tool("file_writer", file_writer)
|
@@ -0,0 +1,369 @@
|
|
1
|
+
Metadata-Version: 2.4
|
2
|
+
Name: dacp
|
3
|
+
Version: 0.3.0
|
4
|
+
Summary: Declarative Agent Communication Protocol - A protocol for managing LLM/agent communications and tool function calls
|
5
|
+
Author-email: Andrew Whitehouse <andrew.whitehouse@example.com>
|
6
|
+
License: MIT
|
7
|
+
Project-URL: Homepage, https://github.com/andrewwhitehouse/dacp
|
8
|
+
Project-URL: Repository, https://github.com/andrewwhitehouse/dacp
|
9
|
+
Project-URL: Documentation, https://github.com/andrewwhitehouse/dacp#readme
|
10
|
+
Project-URL: Issues, https://github.com/andrewwhitehouse/dacp/issues
|
11
|
+
Keywords: llm,agent,communication,protocol,ai,ml
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
13
|
+
Classifier: Intended Audience :: Developers
|
14
|
+
Classifier: License :: OSI Approved :: MIT License
|
15
|
+
Classifier: Operating System :: OS Independent
|
16
|
+
Classifier: Programming Language :: Python :: 3
|
17
|
+
Classifier: Programming Language :: Python :: 3.8
|
18
|
+
Classifier: Programming Language :: Python :: 3.9
|
19
|
+
Classifier: Programming Language :: Python :: 3.10
|
20
|
+
Classifier: Programming Language :: Python :: 3.11
|
21
|
+
Classifier: Programming Language :: Python :: 3.12
|
22
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
23
|
+
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
24
|
+
Requires-Python: >=3.8
|
25
|
+
Description-Content-Type: text/markdown
|
26
|
+
License-File: LICENSE
|
27
|
+
Requires-Dist: requests>=2.25.0
|
28
|
+
Requires-Dist: pyyaml>=5.4.0
|
29
|
+
Provides-Extra: openai
|
30
|
+
Requires-Dist: openai>=1.0.0; extra == "openai"
|
31
|
+
Provides-Extra: anthropic
|
32
|
+
Requires-Dist: anthropic>=0.18.0; extra == "anthropic"
|
33
|
+
Provides-Extra: local
|
34
|
+
Requires-Dist: requests>=2.25.0; extra == "local"
|
35
|
+
Provides-Extra: all
|
36
|
+
Requires-Dist: openai>=1.0.0; extra == "all"
|
37
|
+
Requires-Dist: anthropic>=0.18.0; extra == "all"
|
38
|
+
Provides-Extra: dev
|
39
|
+
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
40
|
+
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
41
|
+
Requires-Dist: black>=22.0.0; extra == "dev"
|
42
|
+
Requires-Dist: flake8>=4.0.0; extra == "dev"
|
43
|
+
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
44
|
+
Requires-Dist: types-requests>=2.25.0; extra == "dev"
|
45
|
+
Requires-Dist: types-PyYAML>=6.0.0; extra == "dev"
|
46
|
+
Dynamic: license-file
|
47
|
+
|
48
|
+
# DACP - Declarative Agent Communication Protocol
|
49
|
+
|
50
|
+
A Python library for managing LLM/agent communications and tool function calls following the OAS Open Agent Specification.
|
51
|
+
|
52
|
+
## Installation
|
53
|
+
|
54
|
+
```bash
|
55
|
+
pip install -e .
|
56
|
+
```
|
57
|
+
|
58
|
+
## Quick Start
|
59
|
+
|
60
|
+
```python
|
61
|
+
import dacp
|
62
|
+
|
63
|
+
# Create an orchestrator to manage agents
|
64
|
+
orchestrator = dacp.Orchestrator()
|
65
|
+
|
66
|
+
# Create and register an agent
|
67
|
+
class MyAgent:
|
68
|
+
def handle_message(self, message):
|
69
|
+
return {"response": f"Hello {message.get('name', 'World')}!"}
|
70
|
+
|
71
|
+
agent = MyAgent()
|
72
|
+
orchestrator.register_agent("my-agent", agent)
|
73
|
+
|
74
|
+
# Send a message to the agent
|
75
|
+
response = orchestrator.send_message("my-agent", {"name": "Alice"})
|
76
|
+
print(response) # {"response": "Hello Alice!"}
|
77
|
+
|
78
|
+
# Use built-in tools
|
79
|
+
result = dacp.file_writer("./output/greeting.txt", "Hello, World!")
|
80
|
+
print(result["message"]) # "Successfully wrote 13 characters to ./output/greeting.txt"
|
81
|
+
|
82
|
+
# Use intelligence providers (supports multiple LLM providers)
|
83
|
+
intelligence_config = {
|
84
|
+
"engine": "anthropic",
|
85
|
+
"model": "claude-3-haiku-20240307",
|
86
|
+
"api_key": "your-api-key" # or set ANTHROPIC_API_KEY env var
|
87
|
+
}
|
88
|
+
response = dacp.invoke_intelligence("What is the weather like today?", intelligence_config)
|
89
|
+
|
90
|
+
# Or use the legacy call_llm function for OpenAI
|
91
|
+
response = dacp.call_llm("What is the weather like today?")
|
92
|
+
```
|
93
|
+
|
94
|
+
## Features
|
95
|
+
|
96
|
+
- **Agent Orchestration**: Central management of multiple agents with message routing
|
97
|
+
- **Tool Registry**: Register and manage custom tools for LLM agents
|
98
|
+
- **Built-in Tools**: Includes a `file_writer` tool that automatically creates parent directories
|
99
|
+
- **LLM Integration**: Built-in support for OpenAI models (extensible)
|
100
|
+
- **Protocol Parsing**: Parse and validate agent responses
|
101
|
+
- **Tool Execution**: Safe execution of registered tools
|
102
|
+
- **Conversation History**: Track and query agent interactions
|
103
|
+
- **OAS Compliance**: Follows Open Agent Specification standards
|
104
|
+
|
105
|
+
## API Reference
|
106
|
+
|
107
|
+
### Orchestrator
|
108
|
+
|
109
|
+
- `Orchestrator()`: Create a new orchestrator instance
|
110
|
+
- `register_agent(agent_id: str, agent) -> None`: Register an agent
|
111
|
+
- `unregister_agent(agent_id: str) -> bool`: Remove an agent
|
112
|
+
- `send_message(agent_id: str, message: Dict) -> Dict`: Send message to specific agent
|
113
|
+
- `broadcast_message(message: Dict, exclude_agents: List[str] = None) -> Dict`: Send message to all agents
|
114
|
+
- `get_conversation_history(agent_id: str = None) -> List[Dict]`: Get conversation history
|
115
|
+
- `clear_history() -> None`: Clear conversation history
|
116
|
+
- `get_session_info() -> Dict`: Get current session information
|
117
|
+
|
118
|
+
### Tools
|
119
|
+
|
120
|
+
- `register_tool(tool_id: str, func)`: Register a new tool
|
121
|
+
- `run_tool(tool_id: str, args: Dict) -> dict`: Execute a registered tool
|
122
|
+
- `TOOL_REGISTRY`: Access the current tool registry
|
123
|
+
- `file_writer(path: str, content: str) -> dict`: Write content to file, creating directories automatically
|
124
|
+
|
125
|
+
### Intelligence (Multi-Provider LLM Support)
|
126
|
+
|
127
|
+
- `invoke_intelligence(prompt: str, config: dict) -> str`: Call any supported LLM provider
|
128
|
+
- `validate_config(config: dict) -> bool`: Validate intelligence configuration
|
129
|
+
- `get_supported_engines() -> list`: Get list of supported engines
|
130
|
+
|
131
|
+
### LLM (Legacy)
|
132
|
+
|
133
|
+
- `call_llm(prompt: str, model: str = "gpt-4") -> str`: Call OpenAI (legacy function)
|
134
|
+
|
135
|
+
### Protocol
|
136
|
+
|
137
|
+
- `parse_agent_response(response: str | dict) -> dict`: Parse agent response
|
138
|
+
- `is_tool_request(msg: dict) -> bool`: Check if message is a tool request
|
139
|
+
- `get_tool_request(msg: dict) -> tuple[str, dict]`: Extract tool request details
|
140
|
+
- `wrap_tool_result(name: str, result: dict) -> dict`: Wrap tool result for agent
|
141
|
+
- `is_final_response(msg: dict) -> bool`: Check if message is a final response
|
142
|
+
- `get_final_response(msg: dict) -> dict`: Extract final response
|
143
|
+
|
144
|
+
## Agent Development
|
145
|
+
|
146
|
+
### Creating an Agent
|
147
|
+
|
148
|
+
Agents must implement a `handle_message` method:
|
149
|
+
|
150
|
+
```python
|
151
|
+
import dacp
|
152
|
+
|
153
|
+
class GreetingAgent:
|
154
|
+
def handle_message(self, message):
|
155
|
+
name = message.get("name", "World")
|
156
|
+
task = message.get("task")
|
157
|
+
|
158
|
+
if task == "greet":
|
159
|
+
return {"response": f"Hello, {name}!"}
|
160
|
+
elif task == "farewell":
|
161
|
+
return {"response": f"Goodbye, {name}!"}
|
162
|
+
else:
|
163
|
+
return {"error": f"Unknown task: {task}"}
|
164
|
+
|
165
|
+
# Register the agent
|
166
|
+
orchestrator = dacp.Orchestrator()
|
167
|
+
agent = GreetingAgent()
|
168
|
+
orchestrator.register_agent("greeter", agent)
|
169
|
+
|
170
|
+
# Use the agent
|
171
|
+
response = orchestrator.send_message("greeter", {
|
172
|
+
"task": "greet",
|
173
|
+
"name": "Alice"
|
174
|
+
})
|
175
|
+
print(response) # {"response": "Hello, Alice!"}
|
176
|
+
```
|
177
|
+
|
178
|
+
### Agent Base Class
|
179
|
+
|
180
|
+
You can also inherit from the `Agent` base class:
|
181
|
+
|
182
|
+
```python
|
183
|
+
import dacp
|
184
|
+
|
185
|
+
class MyAgent(dacp.Agent):
|
186
|
+
def handle_message(self, message):
|
187
|
+
return {"processed": message}
|
188
|
+
```
|
189
|
+
|
190
|
+
### Tool Requests from Agents
|
191
|
+
|
192
|
+
Agents can request tool execution by returning properly formatted responses:
|
193
|
+
|
194
|
+
```python
|
195
|
+
class ToolUsingAgent:
|
196
|
+
def handle_message(self, message):
|
197
|
+
if message.get("task") == "write_file":
|
198
|
+
return {
|
199
|
+
"tool_request": {
|
200
|
+
"name": "file_writer",
|
201
|
+
"args": {
|
202
|
+
"path": "./output/agent_file.txt",
|
203
|
+
"content": "Hello from agent!"
|
204
|
+
}
|
205
|
+
}
|
206
|
+
}
|
207
|
+
return {"response": "Task completed"}
|
208
|
+
|
209
|
+
# The orchestrator will automatically execute the tool and return results
|
210
|
+
orchestrator = dacp.Orchestrator()
|
211
|
+
agent = ToolUsingAgent()
|
212
|
+
orchestrator.register_agent("file-agent", agent)
|
213
|
+
|
214
|
+
response = orchestrator.send_message("file-agent", {"task": "write_file"})
|
215
|
+
# Tool will be executed automatically
|
216
|
+
```
|
217
|
+
|
218
|
+
## Intelligence Configuration
|
219
|
+
|
220
|
+
DACP supports multiple LLM providers through the `invoke_intelligence` function. Configure different providers using a configuration dictionary:
|
221
|
+
|
222
|
+
### OpenAI
|
223
|
+
|
224
|
+
```python
|
225
|
+
import dacp
|
226
|
+
|
227
|
+
openai_config = {
|
228
|
+
"engine": "openai",
|
229
|
+
"model": "gpt-4", # or "gpt-3.5-turbo", "gpt-4-turbo", etc.
|
230
|
+
"api_key": "your-openai-key", # or set OPENAI_API_KEY env var
|
231
|
+
"endpoint": "https://api.openai.com/v1", # optional, uses default
|
232
|
+
"temperature": 0.7, # optional, default 0.7
|
233
|
+
"max_tokens": 150 # optional, default 150
|
234
|
+
}
|
235
|
+
|
236
|
+
response = dacp.invoke_intelligence("Explain quantum computing", openai_config)
|
237
|
+
```
|
238
|
+
|
239
|
+
### Anthropic (Claude)
|
240
|
+
|
241
|
+
```python
|
242
|
+
anthropic_config = {
|
243
|
+
"engine": "anthropic",
|
244
|
+
"model": "claude-3-haiku-20240307", # or other Claude models
|
245
|
+
"api_key": "your-anthropic-key", # or set ANTHROPIC_API_KEY env var
|
246
|
+
"endpoint": "https://api.anthropic.com", # optional, uses default
|
247
|
+
"temperature": 0.7,
|
248
|
+
"max_tokens": 150
|
249
|
+
}
|
250
|
+
|
251
|
+
response = dacp.invoke_intelligence("Write a poem about AI", anthropic_config)
|
252
|
+
```
|
253
|
+
|
254
|
+
### Azure OpenAI
|
255
|
+
|
256
|
+
```python
|
257
|
+
azure_config = {
|
258
|
+
"engine": "azure",
|
259
|
+
"model": "gpt-4", # Your deployed model name
|
260
|
+
"api_key": "your-azure-key", # or set AZURE_OPENAI_API_KEY env var
|
261
|
+
"endpoint": "https://your-resource.openai.azure.com", # or set AZURE_OPENAI_ENDPOINT env var
|
262
|
+
"api_version": "2024-02-01" # optional, default provided
|
263
|
+
}
|
264
|
+
|
265
|
+
response = dacp.invoke_intelligence("Analyze this data", azure_config)
|
266
|
+
```
|
267
|
+
|
268
|
+
### Local LLMs (Ollama, etc.)
|
269
|
+
|
270
|
+
```python
|
271
|
+
# For Ollama (default local setup)
|
272
|
+
local_config = {
|
273
|
+
"engine": "local",
|
274
|
+
"model": "llama2", # or any model available in Ollama
|
275
|
+
"endpoint": "http://localhost:11434/api/generate", # Ollama default
|
276
|
+
"temperature": 0.7,
|
277
|
+
"max_tokens": 150
|
278
|
+
}
|
279
|
+
|
280
|
+
# For custom local APIs
|
281
|
+
custom_local_config = {
|
282
|
+
"engine": "local",
|
283
|
+
"model": "custom-model",
|
284
|
+
"endpoint": "http://localhost:8080/generate", # Your API endpoint
|
285
|
+
"temperature": 0.7,
|
286
|
+
"max_tokens": 150
|
287
|
+
}
|
288
|
+
|
289
|
+
response = dacp.invoke_intelligence("Tell me a story", local_config)
|
290
|
+
```
|
291
|
+
|
292
|
+
### Configuration from OAS YAML
|
293
|
+
|
294
|
+
You can load configuration from OAS (Open Agent Specification) YAML files:
|
295
|
+
|
296
|
+
```python
|
297
|
+
import yaml
|
298
|
+
import dacp
|
299
|
+
|
300
|
+
# Load config from YAML file
|
301
|
+
with open('agent_config.yaml', 'r') as f:
|
302
|
+
config = yaml.safe_load(f)
|
303
|
+
|
304
|
+
intelligence_config = config.get('intelligence', {})
|
305
|
+
response = dacp.invoke_intelligence("Hello, AI!", intelligence_config)
|
306
|
+
```
|
307
|
+
|
308
|
+
### Installation for Different Providers
|
309
|
+
|
310
|
+
Install optional dependencies for the providers you need:
|
311
|
+
|
312
|
+
```bash
|
313
|
+
# For OpenAI
|
314
|
+
pip install dacp[openai]
|
315
|
+
|
316
|
+
# For Anthropic
|
317
|
+
pip install dacp[anthropic]
|
318
|
+
|
319
|
+
# For all providers
|
320
|
+
pip install dacp[all]
|
321
|
+
|
322
|
+
# For local providers (requests is already included in base install)
|
323
|
+
pip install dacp[local]
|
324
|
+
```
|
325
|
+
|
326
|
+
## Built-in Tools
|
327
|
+
|
328
|
+
### file_writer
|
329
|
+
|
330
|
+
The `file_writer` tool automatically creates parent directories and writes content to files:
|
331
|
+
|
332
|
+
```python
|
333
|
+
import dacp
|
334
|
+
|
335
|
+
# This will create the ./output/ directory if it doesn't exist
|
336
|
+
result = dacp.file_writer("./output/file.txt", "Hello, World!")
|
337
|
+
|
338
|
+
if result["success"]:
|
339
|
+
print(f"File written: {result['path']}")
|
340
|
+
print(f"Message: {result['message']}")
|
341
|
+
else:
|
342
|
+
print(f"Error: {result['error']}")
|
343
|
+
```
|
344
|
+
|
345
|
+
**Features:**
|
346
|
+
- ✅ Automatically creates parent directories
|
347
|
+
- ✅ Handles Unicode content properly
|
348
|
+
- ✅ Returns detailed success/error information
|
349
|
+
- ✅ Safe error handling
|
350
|
+
|
351
|
+
## Development
|
352
|
+
|
353
|
+
```bash
|
354
|
+
# Install development dependencies
|
355
|
+
pip install -e .[dev]
|
356
|
+
|
357
|
+
# Run tests
|
358
|
+
pytest
|
359
|
+
|
360
|
+
# Format code
|
361
|
+
black .
|
362
|
+
|
363
|
+
# Lint code
|
364
|
+
flake8
|
365
|
+
```
|
366
|
+
|
367
|
+
## License
|
368
|
+
|
369
|
+
MIT License
|
@@ -0,0 +1,14 @@
|
|
1
|
+
dacp/__init__.py,sha256=-EFiHyOWg8IcG2FI-Hmd9wrHicGi8qAMv0cPwz0oCiU,833
|
2
|
+
dacp/exceptions.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
+
dacp/intelligence.py,sha256=SGLaq6dlJmeDqP0ZgT6bR_4WahNf-K7qWBaTG3yqBMI,9487
|
4
|
+
dacp/llm.py,sha256=JxHqM-aIm9pAMcVHQevbJGxrlBH4uV1ngRQdvyp9L3A,827
|
5
|
+
dacp/main.py,sha256=ZcJLymC9S5A4iO4yV7X178RLlhDrDuAwirdevUD5Yn0,470
|
6
|
+
dacp/orchestrator.py,sha256=PIur8Kts-vtUchmo7E8B59ZLm6s7C7yQ0ScK9E5FXhE,8756
|
7
|
+
dacp/protocol.py,sha256=DVhLTdyDVlAu8ETSEX8trPeycKfMeirHwcWQ8-BY7eA,1026
|
8
|
+
dacp/tools.py,sha256=FLgZa5Brni9H3Hyn336k3DGoVxIa_1rFM4WpIRXzVbc,1569
|
9
|
+
dacp/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
dacp-0.3.0.dist-info/licenses/LICENSE,sha256=tb5kgUYRypHqAy8wlrJUBSYI5l1SBmawSYHmCC-MVW0,1074
|
11
|
+
dacp-0.3.0.dist-info/METADATA,sha256=J9bKw2rxID--1fqZ9Z33lSs2SKTmat72zVLj-DUirU0,11220
|
12
|
+
dacp-0.3.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
13
|
+
dacp-0.3.0.dist-info/top_level.txt,sha256=Qxy0cy5jl7ttTQoGFlY9LXB6CbSvsekJ2y0P8I7L1zA,5
|
14
|
+
dacp-0.3.0.dist-info/RECORD,,
|
dacp-0.1.0.dist-info/METADATA
DELETED
@@ -1,114 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.4
|
2
|
-
Name: dacp
|
3
|
-
Version: 0.1.0
|
4
|
-
Summary: DACP - Declarative Agent Communication Protocol for LLM/agent communications and tool function calls
|
5
|
-
Author-email: Your Name <your.email@example.com>
|
6
|
-
License: MIT
|
7
|
-
Project-URL: Homepage, https://github.com/yourusername/dacp
|
8
|
-
Project-URL: Repository, https://github.com/yourusername/dacp
|
9
|
-
Project-URL: Issues, https://github.com/yourusername/dacp/issues
|
10
|
-
Keywords: llm,agent,tools,openai,communication
|
11
|
-
Classifier: Development Status :: 3 - Alpha
|
12
|
-
Classifier: Intended Audience :: Developers
|
13
|
-
Classifier: License :: OSI Approved :: MIT License
|
14
|
-
Classifier: Programming Language :: Python :: 3
|
15
|
-
Classifier: Programming Language :: Python :: 3.8
|
16
|
-
Classifier: Programming Language :: Python :: 3.9
|
17
|
-
Classifier: Programming Language :: Python :: 3.10
|
18
|
-
Classifier: Programming Language :: Python :: 3.11
|
19
|
-
Classifier: Programming Language :: Python :: 3.12
|
20
|
-
Requires-Python: >=3.8
|
21
|
-
Description-Content-Type: text/markdown
|
22
|
-
License-File: LICENSE
|
23
|
-
Requires-Dist: openai>=1.0.0
|
24
|
-
Provides-Extra: dev
|
25
|
-
Requires-Dist: pytest>=7.0.0; extra == "dev"
|
26
|
-
Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
|
27
|
-
Requires-Dist: black>=22.0.0; extra == "dev"
|
28
|
-
Requires-Dist: flake8>=5.0.0; extra == "dev"
|
29
|
-
Requires-Dist: mypy>=1.0.0; extra == "dev"
|
30
|
-
Requires-Dist: types-requests>=2.28.0; extra == "dev"
|
31
|
-
Dynamic: license-file
|
32
|
-
|
33
|
-
# DACP - Delcarative Agent Communication Protocol
|
34
|
-
|
35
|
-
A Python library for managing LLM/agent communications and tool function calls following the OAS Open Agent Specification.
|
36
|
-
|
37
|
-
## Installation
|
38
|
-
|
39
|
-
```bash
|
40
|
-
pip install -e .
|
41
|
-
```
|
42
|
-
|
43
|
-
## Quick Start
|
44
|
-
|
45
|
-
```python
|
46
|
-
import dacp
|
47
|
-
|
48
|
-
# Register a custom tool
|
49
|
-
def my_custom_tool(param1: str, param2: int) -> dict:
|
50
|
-
return {"result": f"Processed {param1} with {param2}"}
|
51
|
-
|
52
|
-
dacp.register_tool("my_custom_tool", my_custom_tool)
|
53
|
-
|
54
|
-
# Call an LLM
|
55
|
-
response = dacp.call_llm("What is the weather like today?")
|
56
|
-
|
57
|
-
# Parse agent response
|
58
|
-
parsed = dacp.parse_agent_response(response)
|
59
|
-
|
60
|
-
# Check if it's a tool request
|
61
|
-
if dacp.is_tool_request(parsed):
|
62
|
-
tool_name, args = dacp.get_tool_request(parsed)
|
63
|
-
result = dacp.run_tool(tool_name, args)
|
64
|
-
tool_response = dacp.wrap_tool_result(tool_name, result)
|
65
|
-
```
|
66
|
-
|
67
|
-
## Features
|
68
|
-
|
69
|
-
- **Tool Registry**: Register and manage custom tools for LLM agents
|
70
|
-
- **LLM Integration**: Built-in support for OpenAI models (extensible)
|
71
|
-
- **Protocol Parsing**: Parse and validate agent responses
|
72
|
-
- **Tool Execution**: Safe execution of registered tools
|
73
|
-
- **OAS Compliance**: Follows Open Agent Specification standards
|
74
|
-
|
75
|
-
## API Reference
|
76
|
-
|
77
|
-
### Tools
|
78
|
-
|
79
|
-
- `register_tool(tool_id: str, func)`: Register a new tool
|
80
|
-
- `run_tool(tool_id: str, args: Dict) -> dict`: Execute a registered tool
|
81
|
-
- `TOOL_REGISTRY`: Access the current tool registry
|
82
|
-
|
83
|
-
### LLM
|
84
|
-
|
85
|
-
- `call_llm(prompt: str, model: str = "gpt-4") -> str`: Call an LLM with a prompt
|
86
|
-
|
87
|
-
### Protocol
|
88
|
-
|
89
|
-
- `parse_agent_response(response: str | dict) -> dict`: Parse agent response
|
90
|
-
- `is_tool_request(msg: dict) -> bool`: Check if message is a tool request
|
91
|
-
- `get_tool_request(msg: dict) -> tuple[str, dict]`: Extract tool request details
|
92
|
-
- `wrap_tool_result(name: str, result: dict) -> dict`: Wrap tool result for agent
|
93
|
-
- `is_final_response(msg: dict) -> bool`: Check if message is a final response
|
94
|
-
- `get_final_response(msg: dict) -> dict`: Extract final response
|
95
|
-
|
96
|
-
## Development
|
97
|
-
|
98
|
-
```bash
|
99
|
-
# Install development dependencies
|
100
|
-
pip install -e .[dev]
|
101
|
-
|
102
|
-
# Run tests
|
103
|
-
pytest
|
104
|
-
|
105
|
-
# Format code
|
106
|
-
black .
|
107
|
-
|
108
|
-
# Lint code
|
109
|
-
flake8
|
110
|
-
```
|
111
|
-
|
112
|
-
## License
|
113
|
-
|
114
|
-
MIT License
|
dacp-0.1.0.dist-info/RECORD
DELETED
@@ -1,11 +0,0 @@
|
|
1
|
-
dacp/__init__.py,sha256=SY-MnntwhtBTXfVVSSEtK6rLBrlOMkWyXhfPvpeJSDo,690
|
2
|
-
dacp/exceptions.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
dacp/llm.py,sha256=doz7mHby2pLf0khX-AVcNo9DE1yZCGLwAzaEUEfTeo0,581
|
4
|
-
dacp/protocol.py,sha256=DVhLTdyDVlAu8ETSEX8trPeycKfMeirHwcWQ8-BY7eA,1026
|
5
|
-
dacp/tools.py,sha256=fkRAnVPgbsOxfphYgevDmrxfIkSF3lliVWAMLThpY68,971
|
6
|
-
dacp/types.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
7
|
-
dacp-0.1.0.dist-info/licenses/LICENSE,sha256=tb5kgUYRypHqAy8wlrJUBSYI5l1SBmawSYHmCC-MVW0,1074
|
8
|
-
dacp-0.1.0.dist-info/METADATA,sha256=Dwm_o_IWbwSdr3QeRFTccyGzwAwNRRQ20eG8g0BX12I,3447
|
9
|
-
dacp-0.1.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
10
|
-
dacp-0.1.0.dist-info/top_level.txt,sha256=Qxy0cy5jl7ttTQoGFlY9LXB6CbSvsekJ2y0P8I7L1zA,5
|
11
|
-
dacp-0.1.0.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|