dacp 0.3.0__py3-none-any.whl → 0.3.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dacp/__init__.py +23 -4
- dacp/intelligence.py +241 -210
- dacp/llm.py +13 -6
- dacp/logging_config.py +129 -0
- dacp/main.py +7 -13
- dacp/orchestrator.py +225 -201
- dacp/tools.py +72 -22
- dacp-0.3.2.dist-info/METADATA +805 -0
- dacp-0.3.2.dist-info/RECORD +15 -0
- dacp-0.3.0.dist-info/METADATA +0 -369
- dacp-0.3.0.dist-info/RECORD +0 -14
- {dacp-0.3.0.dist-info → dacp-0.3.2.dist-info}/WHEEL +0 -0
- {dacp-0.3.0.dist-info → dacp-0.3.2.dist-info}/licenses/LICENSE +0 -0
- {dacp-0.3.0.dist-info → dacp-0.3.2.dist-info}/top_level.txt +0 -0
dacp/__init__.py
CHANGED
@@ -11,28 +11,47 @@ from .protocol import (
|
|
11
11
|
is_final_response,
|
12
12
|
get_final_response,
|
13
13
|
)
|
14
|
-
from .tools import
|
15
|
-
register_tool, run_tool, TOOL_REGISTRY, file_writer
|
16
|
-
)
|
14
|
+
from .tools import register_tool, execute_tool, TOOL_REGISTRY, file_writer
|
17
15
|
from .llm import call_llm
|
18
16
|
from .intelligence import invoke_intelligence
|
19
17
|
from .orchestrator import Orchestrator, Agent
|
18
|
+
from .logging_config import (
|
19
|
+
setup_dacp_logging,
|
20
|
+
enable_debug_logging,
|
21
|
+
enable_info_logging,
|
22
|
+
enable_quiet_logging,
|
23
|
+
set_dacp_log_level,
|
24
|
+
disable_dacp_logging,
|
25
|
+
enable_dacp_logging,
|
26
|
+
)
|
20
27
|
|
21
28
|
__version__ = "0.3.0"
|
22
29
|
|
23
30
|
__all__ = [
|
31
|
+
# Protocol functions
|
24
32
|
"parse_agent_response",
|
25
33
|
"is_tool_request",
|
26
34
|
"get_tool_request",
|
27
35
|
"wrap_tool_result",
|
28
36
|
"is_final_response",
|
29
37
|
"get_final_response",
|
38
|
+
# Tool functions
|
30
39
|
"register_tool",
|
31
|
-
"
|
40
|
+
"execute_tool",
|
32
41
|
"TOOL_REGISTRY",
|
33
42
|
"file_writer",
|
43
|
+
# LLM functions
|
34
44
|
"call_llm",
|
35
45
|
"invoke_intelligence",
|
46
|
+
# Agent orchestration
|
36
47
|
"Orchestrator",
|
37
48
|
"Agent",
|
49
|
+
# Logging configuration
|
50
|
+
"setup_dacp_logging",
|
51
|
+
"enable_debug_logging",
|
52
|
+
"enable_info_logging",
|
53
|
+
"enable_quiet_logging",
|
54
|
+
"set_dacp_log_level",
|
55
|
+
"disable_dacp_logging",
|
56
|
+
"enable_dacp_logging",
|
38
57
|
]
|
dacp/intelligence.py
CHANGED
@@ -1,272 +1,303 @@
|
|
1
1
|
"""
|
2
|
-
|
2
|
+
Intelligence provider integration for DACP.
|
3
|
+
|
4
|
+
This module provides a unified interface for calling different LLM providers
|
5
|
+
(OpenAI, Anthropic, Azure, Local) with comprehensive error handling and logging.
|
3
6
|
"""
|
4
7
|
|
5
8
|
import os
|
6
9
|
import logging
|
7
|
-
|
10
|
+
import time
|
11
|
+
from typing import Dict, Any, Union
|
8
12
|
|
9
|
-
|
13
|
+
logger = logging.getLogger("dacp.intelligence")
|
10
14
|
|
11
15
|
|
12
|
-
|
13
|
-
|
14
|
-
|
16
|
+
def invoke_intelligence(
|
17
|
+
prompt: str, config: Dict[str, Any]
|
18
|
+
) -> Union[str, Dict[str, Any]]:
|
19
|
+
"""
|
20
|
+
Invoke an intelligence provider with the given prompt and configuration.
|
15
21
|
|
22
|
+
Args:
|
23
|
+
prompt: The input prompt/message to send to the intelligence provider
|
24
|
+
config: Configuration dictionary containing provider settings
|
16
25
|
|
17
|
-
|
18
|
-
|
19
|
-
pass
|
26
|
+
Returns:
|
27
|
+
Response from the intelligence provider (string or dict)
|
20
28
|
|
29
|
+
Raises:
|
30
|
+
ValueError: If configuration is invalid
|
31
|
+
Exception: If the intelligence call fails
|
32
|
+
"""
|
33
|
+
start_time = time.time()
|
21
34
|
|
22
|
-
|
23
|
-
|
24
|
-
pass
|
35
|
+
engine = config.get("engine", "").lower()
|
36
|
+
model = config.get("model", "unknown")
|
25
37
|
|
38
|
+
logger.info(f"🧠 Invoking intelligence: engine='{engine}', model='{model}'")
|
39
|
+
logger.debug(f"📋 Prompt: {prompt[:100]}...")
|
40
|
+
|
41
|
+
try:
|
42
|
+
# Validate configuration
|
43
|
+
_validate_config(config)
|
44
|
+
|
45
|
+
# Route to appropriate provider
|
46
|
+
if engine in ["openai", "gpt"]:
|
47
|
+
result = _invoke_openai(prompt, config)
|
48
|
+
elif engine in ["anthropic", "claude"]:
|
49
|
+
result = _invoke_anthropic(prompt, config)
|
50
|
+
elif engine in ["azure", "azure_openai"]:
|
51
|
+
result = _invoke_azure_openai(prompt, config)
|
52
|
+
elif engine in ["local", "ollama"]:
|
53
|
+
result = _invoke_local(prompt, config)
|
54
|
+
else:
|
55
|
+
available_engines = ["openai", "anthropic", "azure", "local"]
|
56
|
+
raise ValueError(
|
57
|
+
f"Unsupported engine: {engine}. "
|
58
|
+
f"Available engines: {available_engines}"
|
59
|
+
)
|
60
|
+
|
61
|
+
duration = time.time() - start_time
|
62
|
+
logger.info(f"✅ Intelligence call completed in {duration:.3f}s")
|
63
|
+
logger.debug(f"📤 Response: {str(result)[:200]}...")
|
64
|
+
|
65
|
+
return result
|
66
|
+
|
67
|
+
except Exception as e:
|
68
|
+
duration = time.time() - start_time
|
69
|
+
logger.error(
|
70
|
+
f"❌ Intelligence call failed after {duration:.3f}s: "
|
71
|
+
f"{type(e).__name__}: {e}"
|
72
|
+
)
|
73
|
+
raise
|
74
|
+
|
75
|
+
|
76
|
+
def _validate_config(config: Dict[str, Any]) -> None:
|
77
|
+
"""Validate intelligence configuration."""
|
78
|
+
if not isinstance(config, dict):
|
79
|
+
raise ValueError("Configuration must be a dictionary")
|
26
80
|
|
27
|
-
def invoke_intelligence(prompt: str, config: Dict[str, Any]) -> str:
|
28
|
-
"""
|
29
|
-
Invoke an intelligence provider (LLM) with the given prompt and configuration.
|
30
|
-
|
31
|
-
Args:
|
32
|
-
prompt: The input prompt to send to the intelligence provider
|
33
|
-
config: Configuration dictionary containing provider details
|
34
|
-
|
35
|
-
Returns:
|
36
|
-
Response string from the intelligence provider
|
37
|
-
|
38
|
-
Raises:
|
39
|
-
UnsupportedProviderError: If the provider is not supported
|
40
|
-
ConfigurationError: If the configuration is invalid
|
41
|
-
IntelligenceError: For other provider-specific errors
|
42
|
-
"""
|
43
81
|
engine = config.get("engine")
|
44
82
|
if not engine:
|
45
|
-
raise
|
46
|
-
|
47
|
-
engine = engine.lower()
|
48
|
-
|
49
|
-
if engine == "openai":
|
50
|
-
return _invoke_openai(prompt, config)
|
51
|
-
elif engine == "anthropic":
|
52
|
-
return _invoke_anthropic(prompt, config)
|
53
|
-
elif engine == "azure":
|
54
|
-
return _invoke_azure_openai(prompt, config)
|
55
|
-
elif engine == "local":
|
56
|
-
return _invoke_local(prompt, config)
|
57
|
-
else:
|
58
|
-
raise UnsupportedProviderError(f"Unsupported intelligence engine: {engine}")
|
83
|
+
raise ValueError("Engine must be specified in configuration")
|
59
84
|
|
60
85
|
|
61
86
|
def _invoke_openai(prompt: str, config: Dict[str, Any]) -> str:
|
62
|
-
"""Invoke OpenAI
|
87
|
+
"""Invoke OpenAI GPT models."""
|
63
88
|
try:
|
64
89
|
import openai
|
65
90
|
except ImportError:
|
66
|
-
raise
|
67
|
-
|
68
|
-
|
91
|
+
raise ImportError("OpenAI package not installed. Run: pip install openai")
|
92
|
+
|
93
|
+
# Get API key
|
69
94
|
api_key = config.get("api_key") or os.getenv("OPENAI_API_KEY")
|
70
|
-
base_url = config.get("endpoint", "https://api.openai.com/v1")
|
71
|
-
temperature = config.get("temperature", 0.7)
|
72
|
-
max_tokens = config.get("max_tokens", 150)
|
73
|
-
|
74
95
|
if not api_key:
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
response = client.chat.completions.create(
|
80
|
-
model=model,
|
81
|
-
messages=[{"role": "user", "content": prompt}],
|
82
|
-
temperature=temperature,
|
83
|
-
max_tokens=max_tokens
|
96
|
+
logger.error("❌ OpenAI API key not found")
|
97
|
+
raise ValueError(
|
98
|
+
"OpenAI API key not found in config or OPENAI_API_KEY "
|
99
|
+
"environment variable"
|
84
100
|
)
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
101
|
+
|
102
|
+
# Configure client
|
103
|
+
base_url = config.get("base_url")
|
104
|
+
if base_url:
|
105
|
+
client = openai.OpenAI(api_key=api_key, base_url=base_url)
|
106
|
+
else:
|
107
|
+
client = openai.OpenAI(api_key=api_key)
|
108
|
+
|
109
|
+
# Prepare request
|
110
|
+
model = config.get("model", "gpt-3.5-turbo")
|
111
|
+
temperature = config.get("temperature", 0.7)
|
112
|
+
max_tokens = config.get("max_tokens", 1000)
|
113
|
+
|
114
|
+
logger.debug(
|
115
|
+
f"🔧 OpenAI config: model={model}, temp={temperature}, "
|
116
|
+
f"max_tokens={max_tokens}"
|
117
|
+
)
|
118
|
+
|
119
|
+
# Make API call
|
120
|
+
response = client.chat.completions.create(
|
121
|
+
model=model,
|
122
|
+
messages=[{"role": "user", "content": prompt}],
|
123
|
+
temperature=temperature,
|
124
|
+
max_tokens=max_tokens,
|
125
|
+
)
|
126
|
+
|
127
|
+
content = response.choices[0].message.content
|
128
|
+
if content is None:
|
129
|
+
raise ValueError("OpenAI returned empty response")
|
130
|
+
|
131
|
+
return str(content)
|
94
132
|
|
95
133
|
|
96
134
|
def _invoke_anthropic(prompt: str, config: Dict[str, Any]) -> str:
|
97
|
-
"""Invoke Anthropic
|
135
|
+
"""Invoke Anthropic Claude models."""
|
98
136
|
try:
|
99
137
|
import anthropic
|
100
138
|
except ImportError:
|
101
|
-
raise
|
102
|
-
|
103
|
-
|
139
|
+
raise ImportError("Anthropic package not installed. Run: pip install anthropic")
|
140
|
+
|
141
|
+
# Get API key
|
104
142
|
api_key = config.get("api_key") or os.getenv("ANTHROPIC_API_KEY")
|
105
|
-
base_url = config.get("endpoint", "https://api.anthropic.com")
|
106
|
-
max_tokens = config.get("max_tokens", 150)
|
107
|
-
temperature = config.get("temperature", 0.7)
|
108
|
-
|
109
143
|
if not api_key:
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
response = client.messages.create(
|
115
|
-
model=model,
|
116
|
-
max_tokens=max_tokens,
|
117
|
-
temperature=temperature,
|
118
|
-
messages=[{"role": "user", "content": prompt}]
|
144
|
+
logger.error("❌ Anthropic API key not found")
|
145
|
+
raise ValueError(
|
146
|
+
"Anthropic API key not found in config or ANTHROPIC_API_KEY "
|
147
|
+
"environment variable"
|
119
148
|
)
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
149
|
+
|
150
|
+
# Configure client
|
151
|
+
base_url = config.get("base_url")
|
152
|
+
if base_url:
|
153
|
+
client = anthropic.Anthropic(api_key=api_key, base_url=base_url)
|
154
|
+
else:
|
155
|
+
client = anthropic.Anthropic(api_key=api_key)
|
156
|
+
|
157
|
+
# Prepare request
|
158
|
+
model = config.get("model", "claude-3-haiku-20240307")
|
159
|
+
temperature = config.get("temperature", 0.7)
|
160
|
+
max_tokens = config.get("max_tokens", 1000)
|
161
|
+
|
162
|
+
logger.debug(
|
163
|
+
f"🔧 Anthropic config: model={model}, temp={temperature}, "
|
164
|
+
f"max_tokens={max_tokens}"
|
165
|
+
)
|
166
|
+
|
167
|
+
# Make API call
|
168
|
+
response = client.messages.create(
|
169
|
+
model=model,
|
170
|
+
max_tokens=max_tokens,
|
171
|
+
temperature=temperature,
|
172
|
+
messages=[{"role": "user", "content": prompt}],
|
173
|
+
)
|
174
|
+
|
175
|
+
# Get text from first content block
|
176
|
+
content_block = response.content[0]
|
177
|
+
if hasattr(content_block, "text"):
|
178
|
+
return str(content_block.text)
|
179
|
+
else:
|
180
|
+
raise ValueError("Anthropic returned unexpected response format")
|
130
181
|
|
131
182
|
|
132
183
|
def _invoke_azure_openai(prompt: str, config: Dict[str, Any]) -> str:
|
133
|
-
"""Invoke Azure OpenAI
|
184
|
+
"""Invoke Azure OpenAI models."""
|
134
185
|
try:
|
135
186
|
import openai
|
136
187
|
except ImportError:
|
137
|
-
raise
|
138
|
-
|
139
|
-
|
188
|
+
raise ImportError("OpenAI package not installed. Run: pip install openai")
|
189
|
+
|
190
|
+
# Get required Azure configuration
|
140
191
|
api_key = config.get("api_key") or os.getenv("AZURE_OPENAI_API_KEY")
|
141
192
|
endpoint = config.get("endpoint") or os.getenv("AZURE_OPENAI_ENDPOINT")
|
142
|
-
api_version = config.get("api_version", "
|
143
|
-
|
144
|
-
max_tokens = config.get("max_tokens", 150)
|
145
|
-
|
193
|
+
api_version = config.get("api_version", "2023-12-01-preview")
|
194
|
+
|
146
195
|
if not api_key:
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
try:
|
153
|
-
client = openai.AzureOpenAI(
|
154
|
-
api_key=api_key,
|
155
|
-
azure_endpoint=endpoint,
|
156
|
-
api_version=api_version
|
196
|
+
logger.error("❌ Azure OpenAI API key not found")
|
197
|
+
raise ValueError(
|
198
|
+
"Azure OpenAI API key not found in config or "
|
199
|
+
"AZURE_OPENAI_API_KEY environment variable"
|
157
200
|
)
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
201
|
+
|
202
|
+
if not endpoint:
|
203
|
+
logger.error("❌ Azure OpenAI endpoint not found")
|
204
|
+
raise ValueError(
|
205
|
+
"Azure OpenAI endpoint not found in config or "
|
206
|
+
"AZURE_OPENAI_ENDPOINT environment variable"
|
164
207
|
)
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
208
|
+
|
209
|
+
# Configure Azure client
|
210
|
+
client = openai.AzureOpenAI(
|
211
|
+
api_key=api_key, azure_endpoint=endpoint, api_version=api_version
|
212
|
+
)
|
213
|
+
|
214
|
+
# Prepare request
|
215
|
+
model = config.get("model", config.get("deployment_name", "gpt-35-turbo"))
|
216
|
+
temperature = config.get("temperature", 0.7)
|
217
|
+
max_tokens = config.get("max_tokens", 1000)
|
218
|
+
|
219
|
+
logger.debug(
|
220
|
+
f"🔧 Azure OpenAI config: model={model}, temp={temperature}, "
|
221
|
+
f"max_tokens={max_tokens}"
|
222
|
+
)
|
223
|
+
|
224
|
+
# Make API call
|
225
|
+
response = client.chat.completions.create(
|
226
|
+
model=model,
|
227
|
+
messages=[{"role": "user", "content": prompt}],
|
228
|
+
temperature=temperature,
|
229
|
+
max_tokens=max_tokens,
|
230
|
+
)
|
231
|
+
|
232
|
+
content = response.choices[0].message.content
|
233
|
+
if content is None:
|
234
|
+
raise ValueError("Azure OpenAI returned empty response")
|
235
|
+
|
236
|
+
return str(content)
|
174
237
|
|
175
238
|
|
176
239
|
def _invoke_local(prompt: str, config: Dict[str, Any]) -> str:
|
177
|
-
"""Invoke local LLM
|
240
|
+
"""Invoke local LLM (e.g., Ollama)."""
|
178
241
|
import requests
|
179
|
-
|
180
|
-
|
242
|
+
|
243
|
+
base_url = config.get("base_url", "http://localhost:11434")
|
181
244
|
model = config.get("model", "llama2")
|
182
|
-
|
183
|
-
|
184
|
-
|
245
|
+
endpoint = config.get("endpoint", "/api/generate")
|
246
|
+
|
247
|
+
url = f"{base_url.rstrip('/')}{endpoint}"
|
248
|
+
|
249
|
+
logger.debug(f"🔧 Local config: url={url}, model={model}")
|
250
|
+
|
251
|
+
# Prepare request payload
|
252
|
+
payload = {
|
253
|
+
"model": model,
|
254
|
+
"prompt": prompt,
|
255
|
+
"stream": False,
|
256
|
+
"options": {
|
257
|
+
"temperature": config.get("temperature", 0.7),
|
258
|
+
"num_predict": config.get("max_tokens", 1000),
|
259
|
+
},
|
260
|
+
}
|
261
|
+
|
185
262
|
try:
|
186
|
-
|
187
|
-
if "ollama" in endpoint or ":11434" in endpoint:
|
188
|
-
payload = {
|
189
|
-
"model": model,
|
190
|
-
"prompt": prompt,
|
191
|
-
"stream": False,
|
192
|
-
"options": {
|
193
|
-
"temperature": temperature,
|
194
|
-
"num_predict": max_tokens
|
195
|
-
}
|
196
|
-
}
|
197
|
-
else:
|
198
|
-
# Generic local API format
|
199
|
-
payload = {
|
200
|
-
"model": model,
|
201
|
-
"prompt": prompt,
|
202
|
-
"temperature": temperature,
|
203
|
-
"max_tokens": max_tokens
|
204
|
-
}
|
205
|
-
|
206
|
-
response = requests.post(endpoint, json=payload, timeout=30)
|
263
|
+
response = requests.post(url, json=payload, timeout=config.get("timeout", 30))
|
207
264
|
response.raise_for_status()
|
208
|
-
|
265
|
+
|
209
266
|
result = response.json()
|
210
|
-
|
211
|
-
# Handle different response formats
|
267
|
+
|
268
|
+
# Handle different response formats and ensure string return
|
212
269
|
if "response" in result:
|
213
|
-
|
270
|
+
response_text = result["response"]
|
271
|
+
return str(response_text) if response_text is not None else ""
|
272
|
+
elif "content" in result:
|
273
|
+
content_text = result["content"]
|
274
|
+
return str(content_text) if content_text is not None else ""
|
214
275
|
elif "text" in result:
|
215
|
-
|
216
|
-
|
217
|
-
return result["choices"][0].get("text", "") # OpenAI-compatible format
|
276
|
+
text_content = result["text"]
|
277
|
+
return str(text_content) if text_content is not None else ""
|
218
278
|
else:
|
219
|
-
|
220
|
-
|
279
|
+
logger.warning("Unexpected response format from local LLM")
|
280
|
+
return str(result)
|
281
|
+
|
221
282
|
except requests.RequestException as e:
|
222
|
-
|
223
|
-
raise
|
283
|
+
logger.error(f"❌ Local LLM request failed: {e}")
|
284
|
+
raise Exception(f"Local LLM request failed: {e}")
|
224
285
|
except Exception as e:
|
225
|
-
|
226
|
-
raise
|
286
|
+
logger.error(f"❌ Local LLM call failed: {e}")
|
287
|
+
raise
|
227
288
|
|
228
289
|
|
229
|
-
def
|
230
|
-
"""
|
231
|
-
|
290
|
+
def _mask_sensitive_config(config: Dict[str, Any]) -> Dict[str, Any]:
|
291
|
+
"""Mask sensitive information in config for logging."""
|
292
|
+
masked = config.copy()
|
293
|
+
sensitive_keys = ["api_key", "password", "token", "secret"]
|
232
294
|
|
295
|
+
for key in masked:
|
296
|
+
if any(sensitive in key.lower() for sensitive in sensitive_keys):
|
297
|
+
value = masked[key]
|
298
|
+
if isinstance(value, str) and len(value) > 8:
|
299
|
+
masked[key] = f"{value[:4]}...{value[-4:]}"
|
300
|
+
else:
|
301
|
+
masked[key] = "***"
|
233
302
|
|
234
|
-
|
235
|
-
"""
|
236
|
-
Validate intelligence configuration.
|
237
|
-
|
238
|
-
Args:
|
239
|
-
config: Configuration dictionary
|
240
|
-
|
241
|
-
Returns:
|
242
|
-
True if valid
|
243
|
-
|
244
|
-
Raises:
|
245
|
-
ConfigurationError: If configuration is invalid
|
246
|
-
"""
|
247
|
-
if not isinstance(config, dict):
|
248
|
-
raise ConfigurationError("Configuration must be a dictionary")
|
249
|
-
|
250
|
-
engine = config.get("engine")
|
251
|
-
if not engine:
|
252
|
-
raise ConfigurationError("Missing 'engine' in configuration")
|
253
|
-
|
254
|
-
if engine.lower() not in get_supported_engines():
|
255
|
-
raise ConfigurationError(f"Unsupported engine: {engine}. Supported engines: {get_supported_engines()}")
|
256
|
-
|
257
|
-
# Engine-specific validation
|
258
|
-
engine = engine.lower()
|
259
|
-
|
260
|
-
if engine in ["openai", "azure"]:
|
261
|
-
if not config.get("api_key") and not os.getenv("OPENAI_API_KEY") and not os.getenv("AZURE_OPENAI_API_KEY"):
|
262
|
-
raise ConfigurationError(f"API key required for {engine} engine")
|
263
|
-
|
264
|
-
elif engine == "anthropic":
|
265
|
-
if not config.get("api_key") and not os.getenv("ANTHROPIC_API_KEY"):
|
266
|
-
raise ConfigurationError("API key required for Anthropic engine")
|
267
|
-
|
268
|
-
elif engine == "local":
|
269
|
-
if not config.get("endpoint"):
|
270
|
-
config["endpoint"] = "http://localhost:11434/api/generate" # Default to Ollama
|
271
|
-
|
272
|
-
return True
|
303
|
+
return masked
|
dacp/llm.py
CHANGED
@@ -9,13 +9,13 @@ from .intelligence import invoke_intelligence
|
|
9
9
|
|
10
10
|
def call_llm(prompt: str, model: str = "gpt-4") -> str:
|
11
11
|
"""
|
12
|
-
Legacy function for calling LLMs.
|
12
|
+
Legacy function for calling LLMs.
|
13
13
|
Maintained for backward compatibility.
|
14
|
-
|
14
|
+
|
15
15
|
Args:
|
16
16
|
prompt: The input prompt
|
17
17
|
model: The model to use (defaults to gpt-4)
|
18
|
-
|
18
|
+
|
19
19
|
Returns:
|
20
20
|
Response from the LLM
|
21
21
|
"""
|
@@ -26,7 +26,14 @@ def call_llm(prompt: str, model: str = "gpt-4") -> str:
|
|
26
26
|
"api_key": os.getenv("OPENAI_API_KEY"),
|
27
27
|
"endpoint": "https://api.openai.com/v1",
|
28
28
|
"temperature": 0.7,
|
29
|
-
"max_tokens": 150
|
29
|
+
"max_tokens": 150,
|
30
30
|
}
|
31
|
-
|
32
|
-
|
31
|
+
|
32
|
+
result = invoke_intelligence(prompt, config)
|
33
|
+
|
34
|
+
# Ensure we return a string for backward compatibility
|
35
|
+
if isinstance(result, str):
|
36
|
+
return result
|
37
|
+
else:
|
38
|
+
# If it's a dict (error response), convert to string
|
39
|
+
return str(result.get("error", "Unknown error occurred"))
|