dacp 0.3.1__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dacp/__init__.py +3 -5
- dacp/intelligence.py +230 -305
- dacp/llm.py +13 -6
- dacp/logging_config.py +17 -16
- dacp/main.py +7 -13
- dacp/orchestrator.py +218 -182
- dacp/tools.py +64 -45
- {dacp-0.3.1.dist-info → dacp-0.3.2.dist-info}/METADATA +342 -1
- dacp-0.3.2.dist-info/RECORD +15 -0
- dacp-0.3.1.dist-info/RECORD +0 -15
- {dacp-0.3.1.dist-info → dacp-0.3.2.dist-info}/WHEEL +0 -0
- {dacp-0.3.1.dist-info → dacp-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {dacp-0.3.1.dist-info → dacp-0.3.2.dist-info}/top_level.txt +0 -0
dacp/__init__.py
CHANGED
@@ -11,9 +11,7 @@ from .protocol import (
|
|
11
11
|
is_final_response,
|
12
12
|
get_final_response,
|
13
13
|
)
|
14
|
-
from .tools import
|
15
|
-
register_tool, run_tool, TOOL_REGISTRY, file_writer
|
16
|
-
)
|
14
|
+
from .tools import register_tool, execute_tool, TOOL_REGISTRY, file_writer
|
17
15
|
from .llm import call_llm
|
18
16
|
from .intelligence import invoke_intelligence
|
19
17
|
from .orchestrator import Orchestrator, Agent
|
@@ -32,14 +30,14 @@ __version__ = "0.3.0"
|
|
32
30
|
__all__ = [
|
33
31
|
# Protocol functions
|
34
32
|
"parse_agent_response",
|
35
|
-
"is_tool_request",
|
33
|
+
"is_tool_request",
|
36
34
|
"get_tool_request",
|
37
35
|
"wrap_tool_result",
|
38
36
|
"is_final_response",
|
39
37
|
"get_final_response",
|
40
38
|
# Tool functions
|
41
39
|
"register_tool",
|
42
|
-
"
|
40
|
+
"execute_tool",
|
43
41
|
"TOOL_REGISTRY",
|
44
42
|
"file_writer",
|
45
43
|
# LLM functions
|
dacp/intelligence.py
CHANGED
@@ -1,378 +1,303 @@
|
|
1
1
|
"""
|
2
|
-
|
2
|
+
Intelligence provider integration for DACP.
|
3
|
+
|
4
|
+
This module provides a unified interface for calling different LLM providers
|
5
|
+
(OpenAI, Anthropic, Azure, Local) with comprehensive error handling and logging.
|
3
6
|
"""
|
4
7
|
|
5
8
|
import os
|
6
9
|
import logging
|
7
|
-
|
10
|
+
import time
|
11
|
+
from typing import Dict, Any, Union
|
8
12
|
|
9
|
-
# Set up logger for this module
|
10
13
|
logger = logging.getLogger("dacp.intelligence")
|
11
14
|
|
12
15
|
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
class UnsupportedProviderError(IntelligenceError):
|
19
|
-
"""Raised when an unsupported intelligence provider is requested."""
|
20
|
-
pass
|
21
|
-
|
22
|
-
|
23
|
-
class ConfigurationError(IntelligenceError):
|
24
|
-
"""Raised when intelligence configuration is invalid."""
|
25
|
-
pass
|
26
|
-
|
27
|
-
|
28
|
-
def invoke_intelligence(prompt: str, config: Dict[str, Any]) -> str:
|
16
|
+
def invoke_intelligence(
|
17
|
+
prompt: str, config: Dict[str, Any]
|
18
|
+
) -> Union[str, Dict[str, Any]]:
|
29
19
|
"""
|
30
|
-
Invoke an intelligence provider
|
31
|
-
|
20
|
+
Invoke an intelligence provider with the given prompt and configuration.
|
21
|
+
|
32
22
|
Args:
|
33
|
-
prompt: The input prompt to send to the intelligence provider
|
34
|
-
config: Configuration dictionary containing provider
|
35
|
-
|
23
|
+
prompt: The input prompt/message to send to the intelligence provider
|
24
|
+
config: Configuration dictionary containing provider settings
|
25
|
+
|
36
26
|
Returns:
|
37
|
-
Response
|
38
|
-
|
27
|
+
Response from the intelligence provider (string or dict)
|
28
|
+
|
39
29
|
Raises:
|
40
|
-
|
41
|
-
|
42
|
-
IntelligenceError: For other provider-specific errors
|
30
|
+
ValueError: If configuration is invalid
|
31
|
+
Exception: If the intelligence call fails
|
43
32
|
"""
|
44
|
-
engine = config.get("engine")
|
45
|
-
if not engine:
|
46
|
-
logger.error("❌ Missing 'engine' in intelligence configuration")
|
47
|
-
raise ConfigurationError("Missing 'engine' in intelligence configuration")
|
48
|
-
|
49
|
-
engine = engine.lower()
|
50
|
-
model = config.get("model", "default")
|
51
|
-
|
52
|
-
logger.info(f"🧠 Invoking intelligence: engine='{engine}', model='{model}'")
|
53
|
-
logger.debug(f"📋 Prompt length: {len(prompt)} characters")
|
54
|
-
logger.debug(f"⚙️ Full config: {_sanitize_config_for_logging(config)}")
|
55
|
-
|
56
|
-
import time
|
57
33
|
start_time = time.time()
|
58
|
-
|
34
|
+
|
35
|
+
engine = config.get("engine", "").lower()
|
36
|
+
model = config.get("model", "unknown")
|
37
|
+
|
38
|
+
logger.info(f"🧠 Invoking intelligence: engine='{engine}', model='{model}'")
|
39
|
+
logger.debug(f"📋 Prompt: {prompt[:100]}...")
|
40
|
+
|
59
41
|
try:
|
60
|
-
|
42
|
+
# Validate configuration
|
43
|
+
_validate_config(config)
|
44
|
+
|
45
|
+
# Route to appropriate provider
|
46
|
+
if engine in ["openai", "gpt"]:
|
61
47
|
result = _invoke_openai(prompt, config)
|
62
|
-
elif engine
|
48
|
+
elif engine in ["anthropic", "claude"]:
|
63
49
|
result = _invoke_anthropic(prompt, config)
|
64
|
-
elif engine
|
50
|
+
elif engine in ["azure", "azure_openai"]:
|
65
51
|
result = _invoke_azure_openai(prompt, config)
|
66
|
-
elif engine
|
52
|
+
elif engine in ["local", "ollama"]:
|
67
53
|
result = _invoke_local(prompt, config)
|
68
54
|
else:
|
69
|
-
|
70
|
-
raise
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
55
|
+
available_engines = ["openai", "anthropic", "azure", "local"]
|
56
|
+
raise ValueError(
|
57
|
+
f"Unsupported engine: {engine}. "
|
58
|
+
f"Available engines: {available_engines}"
|
59
|
+
)
|
60
|
+
|
61
|
+
duration = time.time() - start_time
|
62
|
+
logger.info(f"✅ Intelligence call completed in {duration:.3f}s")
|
63
|
+
logger.debug(f"📤 Response: {str(result)[:200]}...")
|
64
|
+
|
76
65
|
return result
|
77
|
-
|
78
|
-
except (IntelligenceError, UnsupportedProviderError, ConfigurationError):
|
79
|
-
# Re-raise our own exceptions without modification
|
80
|
-
execution_time = time.time() - start_time
|
81
|
-
logger.error(f"❌ Intelligence call failed after {execution_time:.3f}s")
|
82
|
-
raise
|
66
|
+
|
83
67
|
except Exception as e:
|
84
|
-
|
85
|
-
logger.error(
|
86
|
-
|
68
|
+
duration = time.time() - start_time
|
69
|
+
logger.error(
|
70
|
+
f"❌ Intelligence call failed after {duration:.3f}s: "
|
71
|
+
f"{type(e).__name__}: {e}"
|
72
|
+
)
|
73
|
+
raise
|
74
|
+
|
75
|
+
|
76
|
+
def _validate_config(config: Dict[str, Any]) -> None:
|
77
|
+
"""Validate intelligence configuration."""
|
78
|
+
if not isinstance(config, dict):
|
79
|
+
raise ValueError("Configuration must be a dictionary")
|
80
|
+
|
81
|
+
engine = config.get("engine")
|
82
|
+
if not engine:
|
83
|
+
raise ValueError("Engine must be specified in configuration")
|
87
84
|
|
88
85
|
|
89
86
|
def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
|
90
|
-
"""Invoke OpenAI
|
91
|
-
logger.debug("🔵 Initializing OpenAI provider")
|
92
|
-
|
87
|
+
"""Invoke OpenAI GPT models."""
|
93
88
|
try:
|
94
89
|
import openai
|
95
|
-
logger.debug("✅ OpenAI package imported successfully")
|
96
90
|
except ImportError:
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
model = config.get("model", "gpt-4")
|
91
|
+
raise ImportError("OpenAI package not installed. Run: pip install openai")
|
92
|
+
|
93
|
+
# Get API key
|
101
94
|
api_key = config.get("api_key") or os.getenv("OPENAI_API_KEY")
|
102
|
-
base_url = config.get("endpoint", "https://api.openai.com/v1")
|
103
|
-
temperature = config.get("temperature", 0.7)
|
104
|
-
max_tokens = config.get("max_tokens", 150)
|
105
|
-
|
106
|
-
logger.debug(f"🔧 OpenAI config: model={model}, base_url={base_url}, temp={temperature}, max_tokens={max_tokens}")
|
107
|
-
|
108
95
|
if not api_key:
|
109
96
|
logger.error("❌ OpenAI API key not found")
|
110
|
-
raise
|
111
|
-
|
112
|
-
|
113
|
-
logger.debug("🔗 Creating OpenAI client")
|
114
|
-
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
115
|
-
|
116
|
-
logger.debug("📡 Sending request to OpenAI API")
|
117
|
-
response = client.chat.completions.create(
|
118
|
-
model=model,
|
119
|
-
messages=[{"role": "user", "content": prompt}],
|
120
|
-
temperature=temperature,
|
121
|
-
max_tokens=max_tokens
|
97
|
+
raise ValueError(
|
98
|
+
"OpenAI API key not found in config or OPENAI_API_KEY "
|
99
|
+
"environment variable"
|
122
100
|
)
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
101
|
+
|
102
|
+
# Configure client
|
103
|
+
base_url = config.get("base_url")
|
104
|
+
if base_url:
|
105
|
+
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
106
|
+
else:
|
107
|
+
client = openai.OpenAI(api_key=api_key)
|
108
|
+
|
109
|
+
# Prepare request
|
110
|
+
model = config.get("model", "gpt-3.5-turbo")
|
111
|
+
temperature = config.get("temperature", 0.7)
|
112
|
+
max_tokens = config.get("max_tokens", 1000)
|
113
|
+
|
114
|
+
logger.debug(
|
115
|
+
f"🔧 OpenAI config: model={model}, temp={temperature}, "
|
116
|
+
f"max_tokens={max_tokens}"
|
117
|
+
)
|
118
|
+
|
119
|
+
# Make API call
|
120
|
+
response = client.chat.completions.create(
|
121
|
+
model=model,
|
122
|
+
messages=[{"role": "user", "content": prompt}],
|
123
|
+
temperature=temperature,
|
124
|
+
max_tokens=max_tokens,
|
125
|
+
)
|
126
|
+
|
127
|
+
content = response.choices[0].message.content
|
128
|
+
if content is None:
|
129
|
+
raise ValueError("OpenAI returned empty response")
|
130
|
+
|
131
|
+
return str(content)
|
135
132
|
|
136
133
|
|
137
134
|
def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
|
138
|
-
"""Invoke Anthropic
|
139
|
-
logger.debug("🟣 Initializing Anthropic provider")
|
140
|
-
|
135
|
+
"""Invoke Anthropic Claude models."""
|
141
136
|
try:
|
142
137
|
import anthropic
|
143
|
-
logger.debug("✅ Anthropic package imported successfully")
|
144
138
|
except ImportError:
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
model = config.get("model", "claude-3-haiku-20240307")
|
139
|
+
raise ImportError("Anthropic package not installed. Run: pip install anthropic")
|
140
|
+
|
141
|
+
# Get API key
|
149
142
|
api_key = config.get("api_key") or os.getenv("ANTHROPIC_API_KEY")
|
150
|
-
base_url = config.get("endpoint", "https://api.anthropic.com")
|
151
|
-
max_tokens = config.get("max_tokens", 150)
|
152
|
-
temperature = config.get("temperature", 0.7)
|
153
|
-
|
154
|
-
logger.debug(f"🔧 Anthropic config: model={model}, base_url={base_url}, temp={temperature}, max_tokens={max_tokens}")
|
155
|
-
|
156
143
|
if not api_key:
|
157
144
|
logger.error("❌ Anthropic API key not found")
|
158
|
-
raise
|
159
|
-
|
160
|
-
|
161
|
-
logger.debug("🔗 Creating Anthropic client")
|
162
|
-
client = anthropic.Anthropic(api_key=api_key, base_url=base_url)
|
163
|
-
|
164
|
-
logger.debug("📡 Sending request to Anthropic API")
|
165
|
-
response = client.messages.create(
|
166
|
-
model=model,
|
167
|
-
max_tokens=max_tokens,
|
168
|
-
temperature=temperature,
|
169
|
-
messages=[{"role": "user", "content": prompt}]
|
145
|
+
raise ValueError(
|
146
|
+
"Anthropic API key not found in config or ANTHROPIC_API_KEY "
|
147
|
+
"environment variable"
|
170
148
|
)
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
149
|
+
|
150
|
+
# Configure client
|
151
|
+
base_url = config.get("base_url")
|
152
|
+
if base_url:
|
153
|
+
client = anthropic.Anthropic(api_key=api_key, base_url=base_url)
|
154
|
+
else:
|
155
|
+
client = anthropic.Anthropic(api_key=api_key)
|
156
|
+
|
157
|
+
# Prepare request
|
158
|
+
model = config.get("model", "claude-3-haiku-20240307")
|
159
|
+
temperature = config.get("temperature", 0.7)
|
160
|
+
max_tokens = config.get("max_tokens", 1000)
|
161
|
+
|
162
|
+
logger.debug(
|
163
|
+
f"🔧 Anthropic config: model={model}, temp={temperature}, "
|
164
|
+
f"max_tokens={max_tokens}"
|
165
|
+
)
|
166
|
+
|
167
|
+
# Make API call
|
168
|
+
response = client.messages.create(
|
169
|
+
model=model,
|
170
|
+
max_tokens=max_tokens,
|
171
|
+
temperature=temperature,
|
172
|
+
messages=[{"role": "user", "content": prompt}],
|
173
|
+
)
|
174
|
+
|
175
|
+
# Get text from first content block
|
176
|
+
content_block = response.content[0]
|
177
|
+
if hasattr(content_block, "text"):
|
178
|
+
return str(content_block.text)
|
179
|
+
else:
|
180
|
+
raise ValueError("Anthropic returned unexpected response format")
|
184
181
|
|
185
182
|
|
186
183
|
def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
|
187
|
-
"""Invoke Azure OpenAI
|
188
|
-
logger.debug("🔷 Initializing Azure OpenAI provider")
|
189
|
-
|
184
|
+
"""Invoke Azure OpenAI models."""
|
190
185
|
try:
|
191
186
|
import openai
|
192
|
-
logger.debug("✅ OpenAI package imported successfully")
|
193
187
|
except ImportError:
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
model = config.get("model", "gpt-4")
|
188
|
+
raise ImportError("OpenAI package not installed. Run: pip install openai")
|
189
|
+
|
190
|
+
# Get required Azure configuration
|
198
191
|
api_key = config.get("api_key") or os.getenv("AZURE_OPENAI_API_KEY")
|
199
192
|
endpoint = config.get("endpoint") or os.getenv("AZURE_OPENAI_ENDPOINT")
|
200
|
-
api_version = config.get("api_version", "
|
201
|
-
|
202
|
-
max_tokens = config.get("max_tokens", 150)
|
203
|
-
|
204
|
-
logger.debug(f"🔧 Azure config: model={model}, endpoint={endpoint}, api_version={api_version}, temp={temperature}, max_tokens={max_tokens}")
|
205
|
-
|
193
|
+
api_version = config.get("api_version", "2023-12-01-preview")
|
194
|
+
|
206
195
|
if not api_key:
|
207
196
|
logger.error("❌ Azure OpenAI API key not found")
|
208
|
-
raise
|
209
|
-
|
197
|
+
raise ValueError(
|
198
|
+
"Azure OpenAI API key not found in config or "
|
199
|
+
"AZURE_OPENAI_API_KEY environment variable"
|
200
|
+
)
|
201
|
+
|
210
202
|
if not endpoint:
|
211
203
|
logger.error("❌ Azure OpenAI endpoint not found")
|
212
|
-
raise
|
213
|
-
|
214
|
-
|
215
|
-
logger.debug("🔗 Creating Azure OpenAI client")
|
216
|
-
client = openai.AzureOpenAI(
|
217
|
-
api_key=api_key,
|
218
|
-
azure_endpoint=endpoint,
|
219
|
-
api_version=api_version
|
220
|
-
)
|
221
|
-
|
222
|
-
logger.debug("📡 Sending request to Azure OpenAI API")
|
223
|
-
response = client.chat.completions.create(
|
224
|
-
model=model,
|
225
|
-
messages=[{"role": "user", "content": prompt}],
|
226
|
-
temperature=temperature,
|
227
|
-
max_tokens=max_tokens
|
204
|
+
raise ValueError(
|
205
|
+
"Azure OpenAI endpoint not found in config or "
|
206
|
+
"AZURE_OPENAI_ENDPOINT environment variable"
|
228
207
|
)
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
236
|
-
|
237
|
-
|
238
|
-
|
239
|
-
|
240
|
-
|
208
|
+
|
209
|
+
# Configure Azure client
|
210
|
+
client = openai.AzureOpenAI(
|
211
|
+
api_key=api_key, azure_endpoint=endpoint, api_version=api_version
|
212
|
+
)
|
213
|
+
|
214
|
+
# Prepare request
|
215
|
+
model = config.get("model", config.get("deployment_name", "gpt-35-turbo"))
|
216
|
+
temperature = config.get("temperature", 0.7)
|
217
|
+
max_tokens = config.get("max_tokens", 1000)
|
218
|
+
|
219
|
+
logger.debug(
|
220
|
+
f"🔧 Azure OpenAI config: model={model}, temp={temperature}, "
|
221
|
+
f"max_tokens={max_tokens}"
|
222
|
+
)
|
223
|
+
|
224
|
+
# Make API call
|
225
|
+
response = client.chat.completions.create(
|
226
|
+
model=model,
|
227
|
+
messages=[{"role": "user", "content": prompt}],
|
228
|
+
temperature=temperature,
|
229
|
+
max_tokens=max_tokens,
|
230
|
+
)
|
231
|
+
|
232
|
+
content = response.choices[0].message.content
|
233
|
+
if content is None:
|
234
|
+
raise ValueError("Azure OpenAI returned empty response")
|
235
|
+
|
236
|
+
return str(content)
|
241
237
|
|
242
238
|
|
243
239
|
def _invoke_local(prompt: str, config: Dict[str, Any]) -> str:
|
244
|
-
"""Invoke local LLM
|
240
|
+
"""Invoke local LLM (e.g., Ollama)."""
|
245
241
|
import requests
|
246
|
-
|
247
|
-
|
242
|
+
|
243
|
+
base_url = config.get("base_url", "http://localhost:11434")
|
248
244
|
model = config.get("model", "llama2")
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
logger.debug(f"🔧 Local config:
|
254
|
-
|
245
|
+
endpoint = config.get("endpoint", "/api/generate")
|
246
|
+
|
247
|
+
url = f"{base_url.rstrip('/')}{endpoint}"
|
248
|
+
|
249
|
+
logger.debug(f"🔧 Local config: url={url}, model={model}")
|
250
|
+
|
251
|
+
# Prepare request payload
|
252
|
+
payload = {
|
253
|
+
"model": model,
|
254
|
+
"prompt": prompt,
|
255
|
+
"stream": False,
|
256
|
+
"options": {
|
257
|
+
"temperature": config.get("temperature", 0.7),
|
258
|
+
"num_predict": config.get("max_tokens", 1000),
|
259
|
+
},
|
260
|
+
}
|
261
|
+
|
255
262
|
try:
|
256
|
-
|
257
|
-
if "ollama" in endpoint or ":11434" in endpoint:
|
258
|
-
logger.debug("📦 Using Ollama API format")
|
259
|
-
payload = {
|
260
|
-
"model": model,
|
261
|
-
"prompt": prompt,
|
262
|
-
"stream": False,
|
263
|
-
"options": {
|
264
|
-
"temperature": temperature,
|
265
|
-
"num_predict": max_tokens
|
266
|
-
}
|
267
|
-
}
|
268
|
-
else:
|
269
|
-
logger.debug("📦 Using generic local API format")
|
270
|
-
payload = {
|
271
|
-
"model": model,
|
272
|
-
"prompt": prompt,
|
273
|
-
"temperature": temperature,
|
274
|
-
"max_tokens": max_tokens
|
275
|
-
}
|
276
|
-
|
277
|
-
logger.debug(f"📡 Sending request to local endpoint: {endpoint}")
|
278
|
-
response = requests.post(endpoint, json=payload, timeout=30)
|
263
|
+
response = requests.post(url, json=payload, timeout=config.get("timeout", 30))
|
279
264
|
response.raise_for_status()
|
280
|
-
|
265
|
+
|
281
266
|
result = response.json()
|
282
|
-
|
283
|
-
# Handle different response formats
|
267
|
+
|
268
|
+
# Handle different response formats and ensure string return
|
284
269
|
if "response" in result:
|
285
|
-
response_text = result["response"]
|
286
|
-
|
270
|
+
response_text = result["response"]
|
271
|
+
return str(response_text) if response_text is not None else ""
|
272
|
+
elif "content" in result:
|
273
|
+
content_text = result["content"]
|
274
|
+
return str(content_text) if content_text is not None else ""
|
287
275
|
elif "text" in result:
|
288
|
-
|
289
|
-
|
290
|
-
elif "choices" in result and len(result["choices"]) > 0:
|
291
|
-
response_text = result["choices"][0].get("text", "") # OpenAI-compatible format
|
292
|
-
logger.debug("✅ Local API call successful (OpenAI-compatible format)")
|
276
|
+
text_content = result["text"]
|
277
|
+
return str(text_content) if text_content is not None else ""
|
293
278
|
else:
|
294
|
-
logger.
|
295
|
-
|
296
|
-
|
297
|
-
return response_text
|
298
|
-
|
279
|
+
logger.warning("Unexpected response format from local LLM")
|
280
|
+
return str(result)
|
281
|
+
|
299
282
|
except requests.RequestException as e:
|
300
|
-
logger.error(f"❌ Local
|
301
|
-
raise
|
283
|
+
logger.error(f"❌ Local LLM request failed: {e}")
|
284
|
+
raise Exception(f"Local LLM request failed: {e}")
|
302
285
|
except Exception as e:
|
303
|
-
logger.error(f"❌ Local
|
304
|
-
raise
|
305
|
-
|
286
|
+
logger.error(f"❌ Local LLM call failed: {e}")
|
287
|
+
raise
|
306
288
|
|
307
|
-
def get_supported_engines() -> list:
|
308
|
-
"""Get list of supported intelligence engines."""
|
309
|
-
return ["openai", "anthropic", "azure", "local"]
|
310
289
|
|
290
|
+
def _mask_sensitive_config(config: Dict[str, Any]) -> Dict[str, Any]:
|
291
|
+
"""Mask sensitive information in config for logging."""
|
292
|
+
masked = config.copy()
|
293
|
+
sensitive_keys = ["api_key", "password", "token", "secret"]
|
311
294
|
|
312
|
-
|
313
|
-
|
314
|
-
|
315
|
-
|
316
|
-
|
317
|
-
config: Configuration dictionary
|
318
|
-
|
319
|
-
Returns:
|
320
|
-
True if valid
|
321
|
-
|
322
|
-
Raises:
|
323
|
-
ConfigurationError: If configuration is invalid
|
324
|
-
"""
|
325
|
-
logger.debug(f"🔍 Validating intelligence configuration")
|
326
|
-
|
327
|
-
if not isinstance(config, dict):
|
328
|
-
logger.error("❌ Configuration must be a dictionary")
|
329
|
-
raise ConfigurationError("Configuration must be a dictionary")
|
330
|
-
|
331
|
-
engine = config.get("engine")
|
332
|
-
if not engine:
|
333
|
-
logger.error("❌ Missing 'engine' in configuration")
|
334
|
-
raise ConfigurationError("Missing 'engine' in configuration")
|
335
|
-
|
336
|
-
if engine.lower() not in get_supported_engines():
|
337
|
-
logger.error(f"❌ Unsupported engine: {engine}")
|
338
|
-
raise ConfigurationError(f"Unsupported engine: {engine}. Supported engines: {get_supported_engines()}")
|
339
|
-
|
340
|
-
# Engine-specific validation
|
341
|
-
engine = engine.lower()
|
342
|
-
logger.debug(f"🔧 Validating {engine} specific configuration")
|
343
|
-
|
344
|
-
if engine in ["openai", "azure"]:
|
345
|
-
if not config.get("api_key") and not os.getenv("OPENAI_API_KEY") and not os.getenv("AZURE_OPENAI_API_KEY"):
|
346
|
-
logger.error(f"❌ API key required for {engine} engine")
|
347
|
-
raise ConfigurationError(f"API key required for {engine} engine")
|
348
|
-
|
349
|
-
elif engine == "anthropic":
|
350
|
-
if not config.get("api_key") and not os.getenv("ANTHROPIC_API_KEY"):
|
351
|
-
logger.error("❌ API key required for Anthropic engine")
|
352
|
-
raise ConfigurationError("API key required for Anthropic engine")
|
353
|
-
|
354
|
-
elif engine == "local":
|
355
|
-
if not config.get("endpoint"):
|
356
|
-
config["endpoint"] = "http://localhost:11434/api/generate" # Default to Ollama
|
357
|
-
logger.debug("🔧 Set default endpoint for local engine")
|
358
|
-
|
359
|
-
logger.debug(f"✅ Configuration validation successful for {engine}")
|
360
|
-
return True
|
361
|
-
|
362
|
-
|
363
|
-
def _sanitize_config_for_logging(config: Dict[str, Any]) -> Dict[str, Any]:
|
364
|
-
"""Sanitize config for logging by masking sensitive data."""
|
365
|
-
sanitized = config.copy()
|
366
|
-
|
367
|
-
# Mask sensitive fields
|
368
|
-
sensitive_fields = ['api_key', 'password', 'token', 'secret']
|
369
|
-
for field in sensitive_fields:
|
370
|
-
if field in sanitized and sanitized[field]:
|
371
|
-
# Show first 4 and last 4 characters, mask the rest
|
372
|
-
value = str(sanitized[field])
|
373
|
-
if len(value) > 8:
|
374
|
-
sanitized[field] = f"{value[:4]}...{value[-4:]}"
|
295
|
+
for key in masked:
|
296
|
+
if any(sensitive in key.lower() for sensitive in sensitive_keys):
|
297
|
+
value = masked[key]
|
298
|
+
if isinstance(value, str) and len(value) > 8:
|
299
|
+
masked[key] = f"{value[:4]}...{value[-4:]}"
|
375
300
|
else:
|
376
|
-
|
377
|
-
|
378
|
-
return
|
301
|
+
masked[key] = "***"
|
302
|
+
|
303
|
+
return masked
|