dacp 0.1.0__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dacp/__init__.py +40 -12
- dacp/intelligence.py +378 -0
- dacp/llm.py +28 -15
- dacp/logging_config.py +128 -0
- dacp/main.py +15 -0
- dacp/orchestrator.py +248 -0
- dacp/tools.py +66 -10
- dacp-0.3.1.dist-info/METADATA +464 -0
- dacp-0.3.1.dist-info/RECORD +15 -0
- dacp-0.1.0.dist-info/METADATA +0 -114
- dacp-0.1.0.dist-info/RECORD +0 -11
- {dacp-0.1.0.dist-info → dacp-0.3.1.dist-info}/WHEEL +0 -0
- {dacp-0.1.0.dist-info → dacp-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {dacp-0.1.0.dist-info → dacp-0.3.1.dist-info}/top_level.txt +0 -0
dacp/__init__.py
CHANGED
@@ -1,12 +1,8 @@
|
|
1
1
|
"""
|
2
|
-
DACP
|
3
|
-
|
4
|
-
A Python library for managing LLM/agent communications and tool function calls
|
5
|
-
following the OAS Open Agent Specification.
|
2
|
+
DACP (Declarative Agent Communication Protocol)
|
3
|
+
A protocol for managing LLM/agent communications and tool function calls.
|
6
4
|
"""
|
7
5
|
|
8
|
-
from .tools import register_tool, run_tool, TOOL_REGISTRY
|
9
|
-
from .llm import call_llm
|
10
6
|
from .protocol import (
|
11
7
|
parse_agent_response,
|
12
8
|
is_tool_request,
|
@@ -15,17 +11,49 @@ from .protocol import (
|
|
15
11
|
is_final_response,
|
16
12
|
get_final_response,
|
17
13
|
)
|
14
|
+
from .tools import (
|
15
|
+
register_tool, run_tool, TOOL_REGISTRY, file_writer
|
16
|
+
)
|
17
|
+
from .llm import call_llm
|
18
|
+
from .intelligence import invoke_intelligence
|
19
|
+
from .orchestrator import Orchestrator, Agent
|
20
|
+
from .logging_config import (
|
21
|
+
setup_dacp_logging,
|
22
|
+
enable_debug_logging,
|
23
|
+
enable_info_logging,
|
24
|
+
enable_quiet_logging,
|
25
|
+
set_dacp_log_level,
|
26
|
+
disable_dacp_logging,
|
27
|
+
enable_dacp_logging,
|
28
|
+
)
|
29
|
+
|
30
|
+
__version__ = "0.3.0"
|
18
31
|
|
19
|
-
__version__ = "0.1.0"
|
20
32
|
__all__ = [
|
21
|
-
|
22
|
-
"run_tool",
|
23
|
-
"TOOL_REGISTRY",
|
24
|
-
"call_llm",
|
33
|
+
# Protocol functions
|
25
34
|
"parse_agent_response",
|
26
|
-
"is_tool_request",
|
35
|
+
"is_tool_request",
|
27
36
|
"get_tool_request",
|
28
37
|
"wrap_tool_result",
|
29
38
|
"is_final_response",
|
30
39
|
"get_final_response",
|
40
|
+
# Tool functions
|
41
|
+
"register_tool",
|
42
|
+
"run_tool",
|
43
|
+
"TOOL_REGISTRY",
|
44
|
+
"file_writer",
|
45
|
+
# LLM functions
|
46
|
+
"call_llm",
|
47
|
+
"invoke_intelligence",
|
48
|
+
# Agent orchestration
|
49
|
+
"Orchestrator",
|
50
|
+
"Agent",
|
51
|
+
# Logging configuration
|
52
|
+
"setup_dacp_logging",
|
53
|
+
"enable_debug_logging",
|
54
|
+
"enable_info_logging",
|
55
|
+
"enable_quiet_logging",
|
56
|
+
"set_dacp_log_level",
|
57
|
+
"disable_dacp_logging",
|
58
|
+
"enable_dacp_logging",
|
31
59
|
]
|
dacp/intelligence.py
ADDED
@@ -0,0 +1,378 @@
|
|
1
|
+
"""
|
2
|
+
DACP Intelligence Module - Generic LLM provider interface.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import os
|
6
|
+
import logging
|
7
|
+
from typing import Dict, Any, Optional
|
8
|
+
|
9
|
+
# Set up logger for this module
|
10
|
+
logger = logging.getLogger("dacp.intelligence")
|
11
|
+
|
12
|
+
|
13
|
+
class IntelligenceError(Exception):
|
14
|
+
"""Base exception for intelligence provider errors."""
|
15
|
+
pass
|
16
|
+
|
17
|
+
|
18
|
+
class UnsupportedProviderError(IntelligenceError):
|
19
|
+
"""Raised when an unsupported intelligence provider is requested."""
|
20
|
+
pass
|
21
|
+
|
22
|
+
|
23
|
+
class ConfigurationError(IntelligenceError):
|
24
|
+
"""Raised when intelligence configuration is invalid."""
|
25
|
+
pass
|
26
|
+
|
27
|
+
|
28
|
+
def invoke_intelligence(prompt: str, config: Dict[str, Any]) -> str:
|
29
|
+
"""
|
30
|
+
Invoke an intelligence provider (LLM) with the given prompt and configuration.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
prompt: The input prompt to send to the intelligence provider
|
34
|
+
config: Configuration dictionary containing provider details
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
Response string from the intelligence provider
|
38
|
+
|
39
|
+
Raises:
|
40
|
+
UnsupportedProviderError: If the provider is not supported
|
41
|
+
ConfigurationError: If the configuration is invalid
|
42
|
+
IntelligenceError: For other provider-specific errors
|
43
|
+
"""
|
44
|
+
engine = config.get("engine")
|
45
|
+
if not engine:
|
46
|
+
logger.error("❌ Missing 'engine' in intelligence configuration")
|
47
|
+
raise ConfigurationError("Missing 'engine' in intelligence configuration")
|
48
|
+
|
49
|
+
engine = engine.lower()
|
50
|
+
model = config.get("model", "default")
|
51
|
+
|
52
|
+
logger.info(f"🧠 Invoking intelligence: engine='{engine}', model='{model}'")
|
53
|
+
logger.debug(f"📋 Prompt length: {len(prompt)} characters")
|
54
|
+
logger.debug(f"⚙️ Full config: {_sanitize_config_for_logging(config)}")
|
55
|
+
|
56
|
+
import time
|
57
|
+
start_time = time.time()
|
58
|
+
|
59
|
+
try:
|
60
|
+
if engine == "openai":
|
61
|
+
result = _invoke_openai(prompt, config)
|
62
|
+
elif engine == "anthropic":
|
63
|
+
result = _invoke_anthropic(prompt, config)
|
64
|
+
elif engine == "azure":
|
65
|
+
result = _invoke_azure_openai(prompt, config)
|
66
|
+
elif engine == "local":
|
67
|
+
result = _invoke_local(prompt, config)
|
68
|
+
else:
|
69
|
+
logger.error(f"❌ Unsupported intelligence engine: {engine}")
|
70
|
+
raise UnsupportedProviderError(f"Unsupported intelligence engine: {engine}")
|
71
|
+
|
72
|
+
execution_time = time.time() - start_time
|
73
|
+
logger.info(f"✅ Intelligence response received in {execution_time:.3f}s (length: {len(result)} chars)")
|
74
|
+
logger.debug(f"📤 Response preview: {result[:100]}{'...' if len(result) > 100 else ''}")
|
75
|
+
|
76
|
+
return result
|
77
|
+
|
78
|
+
except (IntelligenceError, UnsupportedProviderError, ConfigurationError):
|
79
|
+
# Re-raise our own exceptions without modification
|
80
|
+
execution_time = time.time() - start_time
|
81
|
+
logger.error(f"❌ Intelligence call failed after {execution_time:.3f}s")
|
82
|
+
raise
|
83
|
+
except Exception as e:
|
84
|
+
execution_time = time.time() - start_time
|
85
|
+
logger.error(f"❌ Unexpected intelligence error after {execution_time:.3f}s: {type(e).__name__}: {e}")
|
86
|
+
raise IntelligenceError(f"Unexpected error: {e}")
|
87
|
+
|
88
|
+
|
89
|
+
def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
|
90
|
+
"""Invoke OpenAI provider."""
|
91
|
+
logger.debug("🔵 Initializing OpenAI provider")
|
92
|
+
|
93
|
+
try:
|
94
|
+
import openai
|
95
|
+
logger.debug("✅ OpenAI package imported successfully")
|
96
|
+
except ImportError:
|
97
|
+
logger.error("❌ OpenAI package not installed")
|
98
|
+
raise IntelligenceError("OpenAI package not installed. Run: pip install openai")
|
99
|
+
|
100
|
+
model = config.get("model", "gpt-4")
|
101
|
+
api_key = config.get("api_key") or os.getenv("OPENAI_API_KEY")
|
102
|
+
base_url = config.get("endpoint", "https://api.openai.com/v1")
|
103
|
+
temperature = config.get("temperature", 0.7)
|
104
|
+
max_tokens = config.get("max_tokens", 150)
|
105
|
+
|
106
|
+
logger.debug(f"🔧 OpenAI config: model={model}, base_url={base_url}, temp={temperature}, max_tokens={max_tokens}")
|
107
|
+
|
108
|
+
if not api_key:
|
109
|
+
logger.error("❌ OpenAI API key not found")
|
110
|
+
raise ConfigurationError("OpenAI API key not found in config or OPENAI_API_KEY environment variable")
|
111
|
+
|
112
|
+
try:
|
113
|
+
logger.debug("🔗 Creating OpenAI client")
|
114
|
+
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
115
|
+
|
116
|
+
logger.debug("📡 Sending request to OpenAI API")
|
117
|
+
response = client.chat.completions.create(
|
118
|
+
model=model,
|
119
|
+
messages=[{"role": "user", "content": prompt}],
|
120
|
+
temperature=temperature,
|
121
|
+
max_tokens=max_tokens
|
122
|
+
)
|
123
|
+
|
124
|
+
content = response.choices[0].message.content
|
125
|
+
if content is None:
|
126
|
+
logger.error("❌ OpenAI returned empty response")
|
127
|
+
raise IntelligenceError("OpenAI returned empty response")
|
128
|
+
|
129
|
+
logger.debug(f"✅ OpenAI API call successful")
|
130
|
+
return content
|
131
|
+
|
132
|
+
except Exception as e:
|
133
|
+
logger.error(f"❌ OpenAI API error: {type(e).__name__}: {e}")
|
134
|
+
raise IntelligenceError(f"OpenAI API error: {e}")
|
135
|
+
|
136
|
+
|
137
|
+
def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
|
138
|
+
"""Invoke Anthropic (Claude) provider."""
|
139
|
+
logger.debug("🟣 Initializing Anthropic provider")
|
140
|
+
|
141
|
+
try:
|
142
|
+
import anthropic
|
143
|
+
logger.debug("✅ Anthropic package imported successfully")
|
144
|
+
except ImportError:
|
145
|
+
logger.error("❌ Anthropic package not installed")
|
146
|
+
raise IntelligenceError("Anthropic package not installed. Run: pip install anthropic")
|
147
|
+
|
148
|
+
model = config.get("model", "claude-3-haiku-20240307")
|
149
|
+
api_key = config.get("api_key") or os.getenv("ANTHROPIC_API_KEY")
|
150
|
+
base_url = config.get("endpoint", "https://api.anthropic.com")
|
151
|
+
max_tokens = config.get("max_tokens", 150)
|
152
|
+
temperature = config.get("temperature", 0.7)
|
153
|
+
|
154
|
+
logger.debug(f"🔧 Anthropic config: model={model}, base_url={base_url}, temp={temperature}, max_tokens={max_tokens}")
|
155
|
+
|
156
|
+
if not api_key:
|
157
|
+
logger.error("❌ Anthropic API key not found")
|
158
|
+
raise ConfigurationError("Anthropic API key not found in config or ANTHROPIC_API_KEY environment variable")
|
159
|
+
|
160
|
+
try:
|
161
|
+
logger.debug("🔗 Creating Anthropic client")
|
162
|
+
client = anthropic.Anthropic(api_key=api_key, base_url=base_url)
|
163
|
+
|
164
|
+
logger.debug("📡 Sending request to Anthropic API")
|
165
|
+
response = client.messages.create(
|
166
|
+
model=model,
|
167
|
+
max_tokens=max_tokens,
|
168
|
+
temperature=temperature,
|
169
|
+
messages=[{"role": "user", "content": prompt}]
|
170
|
+
)
|
171
|
+
|
172
|
+
if not response.content or len(response.content) == 0:
|
173
|
+
logger.error("❌ Anthropic returned empty response")
|
174
|
+
raise IntelligenceError("Anthropic returned empty response")
|
175
|
+
|
176
|
+
# Anthropic returns a list of content blocks
|
177
|
+
result = response.content[0].text
|
178
|
+
logger.debug(f"✅ Anthropic API call successful")
|
179
|
+
return result
|
180
|
+
|
181
|
+
except Exception as e:
|
182
|
+
logger.error(f"❌ Anthropic API error: {type(e).__name__}: {e}")
|
183
|
+
raise IntelligenceError(f"Anthropic API error: {e}")
|
184
|
+
|
185
|
+
|
186
|
+
def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
|
187
|
+
"""Invoke Azure OpenAI provider."""
|
188
|
+
logger.debug("🔷 Initializing Azure OpenAI provider")
|
189
|
+
|
190
|
+
try:
|
191
|
+
import openai
|
192
|
+
logger.debug("✅ OpenAI package imported successfully")
|
193
|
+
except ImportError:
|
194
|
+
logger.error("❌ OpenAI package not installed")
|
195
|
+
raise IntelligenceError("OpenAI package not installed. Run: pip install openai")
|
196
|
+
|
197
|
+
model = config.get("model", "gpt-4")
|
198
|
+
api_key = config.get("api_key") or os.getenv("AZURE_OPENAI_API_KEY")
|
199
|
+
endpoint = config.get("endpoint") or os.getenv("AZURE_OPENAI_ENDPOINT")
|
200
|
+
api_version = config.get("api_version", "2024-02-01")
|
201
|
+
temperature = config.get("temperature", 0.7)
|
202
|
+
max_tokens = config.get("max_tokens", 150)
|
203
|
+
|
204
|
+
logger.debug(f"🔧 Azure config: model={model}, endpoint={endpoint}, api_version={api_version}, temp={temperature}, max_tokens={max_tokens}")
|
205
|
+
|
206
|
+
if not api_key:
|
207
|
+
logger.error("❌ Azure OpenAI API key not found")
|
208
|
+
raise ConfigurationError("Azure OpenAI API key not found in config or AZURE_OPENAI_API_KEY environment variable")
|
209
|
+
|
210
|
+
if not endpoint:
|
211
|
+
logger.error("❌ Azure OpenAI endpoint not found")
|
212
|
+
raise ConfigurationError("Azure OpenAI endpoint not found in config or AZURE_OPENAI_ENDPOINT environment variable")
|
213
|
+
|
214
|
+
try:
|
215
|
+
logger.debug("🔗 Creating Azure OpenAI client")
|
216
|
+
client = openai.AzureOpenAI(
|
217
|
+
api_key=api_key,
|
218
|
+
azure_endpoint=endpoint,
|
219
|
+
api_version=api_version
|
220
|
+
)
|
221
|
+
|
222
|
+
logger.debug("📡 Sending request to Azure OpenAI API")
|
223
|
+
response = client.chat.completions.create(
|
224
|
+
model=model,
|
225
|
+
messages=[{"role": "user", "content": prompt}],
|
226
|
+
temperature=temperature,
|
227
|
+
max_tokens=max_tokens
|
228
|
+
)
|
229
|
+
|
230
|
+
content = response.choices[0].message.content
|
231
|
+
if content is None:
|
232
|
+
logger.error("❌ Azure OpenAI returned empty response")
|
233
|
+
raise IntelligenceError("Azure OpenAI returned empty response")
|
234
|
+
|
235
|
+
logger.debug(f"✅ Azure OpenAI API call successful")
|
236
|
+
return content
|
237
|
+
|
238
|
+
except Exception as e:
|
239
|
+
logger.error(f"❌ Azure OpenAI API error: {type(e).__name__}: {e}")
|
240
|
+
raise IntelligenceError(f"Azure OpenAI API error: {e}")
|
241
|
+
|
242
|
+
|
243
|
+
def _invoke_local(prompt: str, config: Dict[str, Any]) -> str:
|
244
|
+
"""Invoke local LLM provider (e.g., Ollama, local API)."""
|
245
|
+
import requests
|
246
|
+
|
247
|
+
endpoint = config.get("endpoint", "http://localhost:11434/api/generate")
|
248
|
+
model = config.get("model", "llama2")
|
249
|
+
temperature = config.get("temperature", 0.7)
|
250
|
+
max_tokens = config.get("max_tokens", 150)
|
251
|
+
|
252
|
+
logger.debug(f"🟢 Initializing local provider")
|
253
|
+
logger.debug(f"🔧 Local config: model={model}, endpoint={endpoint}, temp={temperature}, max_tokens={max_tokens}")
|
254
|
+
|
255
|
+
try:
|
256
|
+
# Format for Ollama API
|
257
|
+
if "ollama" in endpoint or ":11434" in endpoint:
|
258
|
+
logger.debug("📦 Using Ollama API format")
|
259
|
+
payload = {
|
260
|
+
"model": model,
|
261
|
+
"prompt": prompt,
|
262
|
+
"stream": False,
|
263
|
+
"options": {
|
264
|
+
"temperature": temperature,
|
265
|
+
"num_predict": max_tokens
|
266
|
+
}
|
267
|
+
}
|
268
|
+
else:
|
269
|
+
logger.debug("📦 Using generic local API format")
|
270
|
+
payload = {
|
271
|
+
"model": model,
|
272
|
+
"prompt": prompt,
|
273
|
+
"temperature": temperature,
|
274
|
+
"max_tokens": max_tokens
|
275
|
+
}
|
276
|
+
|
277
|
+
logger.debug(f"📡 Sending request to local endpoint: {endpoint}")
|
278
|
+
response = requests.post(endpoint, json=payload, timeout=30)
|
279
|
+
response.raise_for_status()
|
280
|
+
|
281
|
+
result = response.json()
|
282
|
+
|
283
|
+
# Handle different response formats
|
284
|
+
if "response" in result:
|
285
|
+
response_text = result["response"] # Ollama format
|
286
|
+
logger.debug("✅ Local API call successful (Ollama format)")
|
287
|
+
elif "text" in result:
|
288
|
+
response_text = result["text"] # Generic format
|
289
|
+
logger.debug("✅ Local API call successful (generic format)")
|
290
|
+
elif "choices" in result and len(result["choices"]) > 0:
|
291
|
+
response_text = result["choices"][0].get("text", "") # OpenAI-compatible format
|
292
|
+
logger.debug("✅ Local API call successful (OpenAI-compatible format)")
|
293
|
+
else:
|
294
|
+
logger.error(f"❌ Unexpected response format from local provider: {result}")
|
295
|
+
raise IntelligenceError(f"Unexpected response format from local provider: {result}")
|
296
|
+
|
297
|
+
return response_text
|
298
|
+
|
299
|
+
except requests.RequestException as e:
|
300
|
+
logger.error(f"❌ Local provider request error: {type(e).__name__}: {e}")
|
301
|
+
raise IntelligenceError(f"Local provider request error: {e}")
|
302
|
+
except Exception as e:
|
303
|
+
logger.error(f"❌ Local provider error: {type(e).__name__}: {e}")
|
304
|
+
raise IntelligenceError(f"Local provider error: {e}")
|
305
|
+
|
306
|
+
|
307
|
+
def get_supported_engines() -> list:
|
308
|
+
"""Get list of supported intelligence engines."""
|
309
|
+
return ["openai", "anthropic", "azure", "local"]
|
310
|
+
|
311
|
+
|
312
|
+
def validate_config(config: Dict[str, Any]) -> bool:
|
313
|
+
"""
|
314
|
+
Validate intelligence configuration.
|
315
|
+
|
316
|
+
Args:
|
317
|
+
config: Configuration dictionary
|
318
|
+
|
319
|
+
Returns:
|
320
|
+
True if valid
|
321
|
+
|
322
|
+
Raises:
|
323
|
+
ConfigurationError: If configuration is invalid
|
324
|
+
"""
|
325
|
+
logger.debug(f"🔍 Validating intelligence configuration")
|
326
|
+
|
327
|
+
if not isinstance(config, dict):
|
328
|
+
logger.error("❌ Configuration must be a dictionary")
|
329
|
+
raise ConfigurationError("Configuration must be a dictionary")
|
330
|
+
|
331
|
+
engine = config.get("engine")
|
332
|
+
if not engine:
|
333
|
+
logger.error("❌ Missing 'engine' in configuration")
|
334
|
+
raise ConfigurationError("Missing 'engine' in configuration")
|
335
|
+
|
336
|
+
if engine.lower() not in get_supported_engines():
|
337
|
+
logger.error(f"❌ Unsupported engine: {engine}")
|
338
|
+
raise ConfigurationError(f"Unsupported engine: {engine}. Supported engines: {get_supported_engines()}")
|
339
|
+
|
340
|
+
# Engine-specific validation
|
341
|
+
engine = engine.lower()
|
342
|
+
logger.debug(f"🔧 Validating {engine} specific configuration")
|
343
|
+
|
344
|
+
if engine in ["openai", "azure"]:
|
345
|
+
if not config.get("api_key") and not os.getenv("OPENAI_API_KEY") and not os.getenv("AZURE_OPENAI_API_KEY"):
|
346
|
+
logger.error(f"❌ API key required for {engine} engine")
|
347
|
+
raise ConfigurationError(f"API key required for {engine} engine")
|
348
|
+
|
349
|
+
elif engine == "anthropic":
|
350
|
+
if not config.get("api_key") and not os.getenv("ANTHROPIC_API_KEY"):
|
351
|
+
logger.error("❌ API key required for Anthropic engine")
|
352
|
+
raise ConfigurationError("API key required for Anthropic engine")
|
353
|
+
|
354
|
+
elif engine == "local":
|
355
|
+
if not config.get("endpoint"):
|
356
|
+
config["endpoint"] = "http://localhost:11434/api/generate" # Default to Ollama
|
357
|
+
logger.debug("🔧 Set default endpoint for local engine")
|
358
|
+
|
359
|
+
logger.debug(f"✅ Configuration validation successful for {engine}")
|
360
|
+
return True
|
361
|
+
|
362
|
+
|
363
|
+
def _sanitize_config_for_logging(config: Dict[str, Any]) -> Dict[str, Any]:
|
364
|
+
"""Sanitize config for logging by masking sensitive data."""
|
365
|
+
sanitized = config.copy()
|
366
|
+
|
367
|
+
# Mask sensitive fields
|
368
|
+
sensitive_fields = ['api_key', 'password', 'token', 'secret']
|
369
|
+
for field in sensitive_fields:
|
370
|
+
if field in sanitized and sanitized[field]:
|
371
|
+
# Show first 4 and last 4 characters, mask the rest
|
372
|
+
value = str(sanitized[field])
|
373
|
+
if len(value) > 8:
|
374
|
+
sanitized[field] = f"{value[:4]}...{value[-4:]}"
|
375
|
+
else:
|
376
|
+
sanitized[field] = "***"
|
377
|
+
|
378
|
+
return sanitized
|
dacp/llm.py
CHANGED
@@ -1,19 +1,32 @@
|
|
1
|
+
"""
|
2
|
+
Legacy LLM module - Provides backward compatibility for call_llm function.
|
3
|
+
Uses the new intelligence module under the hood.
|
4
|
+
"""
|
5
|
+
|
1
6
|
import os
|
2
|
-
import
|
7
|
+
from .intelligence import invoke_intelligence
|
3
8
|
|
4
9
|
|
5
10
|
def call_llm(prompt: str, model: str = "gpt-4") -> str:
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
11
|
+
"""
|
12
|
+
Legacy function for calling LLMs.
|
13
|
+
Maintained for backward compatibility.
|
14
|
+
|
15
|
+
Args:
|
16
|
+
prompt: The input prompt
|
17
|
+
model: The model to use (defaults to gpt-4)
|
18
|
+
|
19
|
+
Returns:
|
20
|
+
Response from the LLM
|
21
|
+
"""
|
22
|
+
# Create OpenAI config for backward compatibility
|
23
|
+
config = {
|
24
|
+
"engine": "openai",
|
25
|
+
"model": model,
|
26
|
+
"api_key": os.getenv("OPENAI_API_KEY"),
|
27
|
+
"endpoint": "https://api.openai.com/v1",
|
28
|
+
"temperature": 0.7,
|
29
|
+
"max_tokens": 150
|
30
|
+
}
|
31
|
+
|
32
|
+
return invoke_intelligence(prompt, config)
|
dacp/logging_config.py
ADDED
@@ -0,0 +1,128 @@
|
|
1
|
+
"""
|
2
|
+
DACP Logging Configuration
|
3
|
+
|
4
|
+
Utilities for configuring logging for DACP components.
|
5
|
+
"""
|
6
|
+
|
7
|
+
import logging
|
8
|
+
import sys
|
9
|
+
from typing import Optional
|
10
|
+
|
11
|
+
|
12
|
+
def setup_dacp_logging(
|
13
|
+
level: str = "INFO",
|
14
|
+
format_style: str = "detailed",
|
15
|
+
include_timestamp: bool = True,
|
16
|
+
log_file: Optional[str] = None
|
17
|
+
) -> None:
|
18
|
+
"""
|
19
|
+
Set up logging for DACP components.
|
20
|
+
|
21
|
+
Args:
|
22
|
+
level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
23
|
+
format_style: Log format style ('simple', 'detailed', 'emoji')
|
24
|
+
include_timestamp: Whether to include timestamps in logs
|
25
|
+
log_file: Optional file path to also log to a file
|
26
|
+
"""
|
27
|
+
# Define format styles
|
28
|
+
if format_style == "simple":
|
29
|
+
if include_timestamp:
|
30
|
+
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
31
|
+
else:
|
32
|
+
log_format = "%(name)s - %(levelname)s - %(message)s"
|
33
|
+
elif format_style == "detailed":
|
34
|
+
if include_timestamp:
|
35
|
+
log_format = "%(asctime)s - %(name)s:%(lineno)d - %(levelname)s - %(message)s"
|
36
|
+
else:
|
37
|
+
log_format = "%(name)s:%(lineno)d - %(levelname)s - %(message)s"
|
38
|
+
elif format_style == "emoji":
|
39
|
+
# Emoji format doesn't include logger name since emojis provide context
|
40
|
+
if include_timestamp:
|
41
|
+
log_format = "%(asctime)s - %(message)s"
|
42
|
+
else:
|
43
|
+
log_format = "%(message)s"
|
44
|
+
else:
|
45
|
+
raise ValueError(f"Unknown format_style: {format_style}")
|
46
|
+
|
47
|
+
# Configure root logger for DACP components
|
48
|
+
logger = logging.getLogger("dacp")
|
49
|
+
logger.setLevel(getattr(logging, level.upper()))
|
50
|
+
|
51
|
+
# Remove existing handlers to avoid duplicates
|
52
|
+
for handler in logger.handlers[:]:
|
53
|
+
logger.removeHandler(handler)
|
54
|
+
|
55
|
+
# Create formatter
|
56
|
+
formatter = logging.Formatter(
|
57
|
+
log_format,
|
58
|
+
datefmt="%Y-%m-%d %H:%M:%S" if include_timestamp else None
|
59
|
+
)
|
60
|
+
|
61
|
+
# Console handler
|
62
|
+
console_handler = logging.StreamHandler(sys.stdout)
|
63
|
+
console_handler.setFormatter(formatter)
|
64
|
+
logger.addHandler(console_handler)
|
65
|
+
|
66
|
+
# Optional file handler
|
67
|
+
if log_file:
|
68
|
+
file_handler = logging.FileHandler(log_file)
|
69
|
+
file_handler.setFormatter(formatter)
|
70
|
+
logger.addHandler(file_handler)
|
71
|
+
|
72
|
+
# Prevent propagation to root logger to avoid duplicate messages
|
73
|
+
logger.propagate = False
|
74
|
+
|
75
|
+
logger.info(f"🚀 DACP logging configured: level={level}, style={format_style}")
|
76
|
+
|
77
|
+
|
78
|
+
def set_dacp_log_level(level: str) -> None:
|
79
|
+
"""
|
80
|
+
Set the log level for all DACP components.
|
81
|
+
|
82
|
+
Args:
|
83
|
+
level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
84
|
+
"""
|
85
|
+
logger = logging.getLogger("dacp")
|
86
|
+
logger.setLevel(getattr(logging, level.upper()))
|
87
|
+
logger.info(f"📊 DACP log level changed to {level}")
|
88
|
+
|
89
|
+
|
90
|
+
def disable_dacp_logging() -> None:
|
91
|
+
"""Disable all DACP logging."""
|
92
|
+
logger = logging.getLogger("dacp")
|
93
|
+
logger.disabled = True
|
94
|
+
|
95
|
+
|
96
|
+
def enable_dacp_logging() -> None:
|
97
|
+
"""Re-enable DACP logging."""
|
98
|
+
logger = logging.getLogger("dacp")
|
99
|
+
logger.disabled = False
|
100
|
+
|
101
|
+
|
102
|
+
def get_dacp_logger(name: str) -> logging.Logger:
|
103
|
+
"""
|
104
|
+
Get a logger for a DACP component.
|
105
|
+
|
106
|
+
Args:
|
107
|
+
name: Logger name (usually __name__)
|
108
|
+
|
109
|
+
Returns:
|
110
|
+
Configured logger
|
111
|
+
"""
|
112
|
+
return logging.getLogger(f"dacp.{name}")
|
113
|
+
|
114
|
+
|
115
|
+
# Convenience functions for quick setup
|
116
|
+
def enable_debug_logging(log_file: Optional[str] = None) -> None:
|
117
|
+
"""Enable debug logging with detailed format."""
|
118
|
+
setup_dacp_logging(level="DEBUG", format_style="detailed", log_file=log_file)
|
119
|
+
|
120
|
+
|
121
|
+
def enable_info_logging(log_file: Optional[str] = None) -> None:
|
122
|
+
"""Enable info logging with emoji format."""
|
123
|
+
setup_dacp_logging(level="INFO", format_style="emoji", log_file=log_file)
|
124
|
+
|
125
|
+
|
126
|
+
def enable_quiet_logging() -> None:
|
127
|
+
"""Enable only error and critical logging."""
|
128
|
+
setup_dacp_logging(level="ERROR", format_style="simple", include_timestamp=False)
|
dacp/main.py
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
from dacp.orchestrator import Orchestrator
|
2
|
+
|
3
|
+
def main():
|
4
|
+
orchestrator = Orchestrator()
|
5
|
+
|
6
|
+
# Agent registers itself with the orchestrator
|
7
|
+
hello_agent = HelloWorldAgent("hello_agent", orchestrator)
|
8
|
+
|
9
|
+
# Orchestrator sends a message to the agent and prints the response
|
10
|
+
input_message = {"name": "Alice"}
|
11
|
+
response = orchestrator.call_agent("hello_agent", input_message)
|
12
|
+
print("Orchestrator received:", response)
|
13
|
+
|
14
|
+
if __name__ == "__main__":
|
15
|
+
main()
|