praisonaiagents 0.0.119__tar.gz → 0.0.120__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/PKG-INFO +1 -1
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/agent/agent.py +14 -5
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/agents/autoagents.py +65 -23
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/llm/llm.py +211 -157
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/pyproject.toml +1 -1
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/README.md +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/llm/openai_client.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/setup.cfg +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/tests/test.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/tests/test_posthog_fixed.py +0 -0
@@ -1233,7 +1233,8 @@ Your Goal: {self.goal}
|
|
1233
1233
|
# Add to chat history and return raw response
|
1234
1234
|
self.chat_history.append({"role": "user", "content": original_prompt})
|
1235
1235
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1236
|
-
if
|
1236
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1237
|
+
if self.verbose and not self._using_custom_llm:
|
1237
1238
|
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
1238
1239
|
generation_time=time.time() - start_time, console=self.console)
|
1239
1240
|
return response_text
|
@@ -1243,7 +1244,9 @@ Your Goal: {self.goal}
|
|
1243
1244
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1244
1245
|
if self.verbose:
|
1245
1246
|
logging.debug(f"Agent {self.name} final response: {response_text}")
|
1246
|
-
|
1247
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1248
|
+
if self.verbose and not self._using_custom_llm:
|
1249
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1247
1250
|
# Return only reasoning content if reasoning_steps is True
|
1248
1251
|
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
1249
1252
|
# Apply guardrail to reasoning content
|
@@ -1279,7 +1282,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1279
1282
|
# Return the original response without reflection
|
1280
1283
|
self.chat_history.append({"role": "user", "content": prompt})
|
1281
1284
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1282
|
-
|
1285
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1286
|
+
if self.verbose and not self._using_custom_llm:
|
1287
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1283
1288
|
return response_text
|
1284
1289
|
|
1285
1290
|
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
@@ -1302,7 +1307,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1302
1307
|
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
|
1303
1308
|
self.chat_history.append({"role": "user", "content": prompt})
|
1304
1309
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1305
|
-
|
1310
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1311
|
+
if self.verbose and not self._using_custom_llm:
|
1312
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1306
1313
|
# Apply guardrail validation after satisfactory reflection
|
1307
1314
|
try:
|
1308
1315
|
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
@@ -1317,7 +1324,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1317
1324
|
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
1318
1325
|
self.chat_history.append({"role": "user", "content": prompt})
|
1319
1326
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1320
|
-
|
1327
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1328
|
+
if self.verbose and not self._using_custom_llm:
|
1329
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1321
1330
|
# Apply guardrail validation after max reflections
|
1322
1331
|
try:
|
1323
1332
|
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
@@ -13,7 +13,8 @@ import logging
|
|
13
13
|
import os
|
14
14
|
from pydantic import BaseModel, ConfigDict
|
15
15
|
from ..main import display_instruction, display_tool_call, display_interaction
|
16
|
-
from ..llm import get_openai_client
|
16
|
+
from ..llm import get_openai_client, LLM
|
17
|
+
import json
|
17
18
|
|
18
19
|
# Define Pydantic models for structured output
|
19
20
|
class TaskConfig(BaseModel):
|
@@ -238,33 +239,74 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
|
|
238
239
|
"""
|
239
240
|
|
240
241
|
try:
|
241
|
-
#
|
242
|
+
# Try to use OpenAI's structured output if available
|
243
|
+
use_openai_structured = False
|
244
|
+
client = None
|
245
|
+
|
242
246
|
try:
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
247
|
+
# Check if we have OpenAI API and the model supports structured output
|
248
|
+
if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
|
249
|
+
client = get_openai_client()
|
250
|
+
use_openai_structured = True
|
251
|
+
except:
|
252
|
+
# If OpenAI client is not available, we'll use the LLM class
|
253
|
+
pass
|
254
|
+
|
255
|
+
if use_openai_structured and client:
|
256
|
+
# Use OpenAI's structured output for OpenAI models (backward compatibility)
|
257
|
+
response = client.beta.chat.completions.parse(
|
258
|
+
model=self.llm,
|
259
|
+
response_format=AutoAgentsConfig,
|
260
|
+
messages=[
|
261
|
+
{"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
|
262
|
+
{"role": "user", "content": prompt}
|
263
|
+
]
|
264
|
+
)
|
265
|
+
config = response.choices[0].message.parsed
|
266
|
+
else:
|
267
|
+
# Use LLM class for all other providers (Gemini, Anthropic, etc.)
|
268
|
+
llm_instance = LLM(
|
269
|
+
model=self.llm,
|
270
|
+
base_url=self.base_url,
|
271
|
+
api_key=self.api_key
|
272
|
+
)
|
251
273
|
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
274
|
+
response_text = llm_instance.response(
|
275
|
+
prompt=prompt,
|
276
|
+
system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
|
277
|
+
output_pydantic=AutoAgentsConfig,
|
278
|
+
temperature=0.7,
|
279
|
+
stream=False,
|
280
|
+
verbose=False
|
281
|
+
)
|
282
|
+
|
283
|
+
# Parse the JSON response
|
284
|
+
try:
|
285
|
+
# First try to parse as is
|
286
|
+
config_dict = json.loads(response_text)
|
287
|
+
config = AutoAgentsConfig(**config_dict)
|
288
|
+
except json.JSONDecodeError:
|
289
|
+
# If that fails, try to extract JSON from the response
|
290
|
+
# Handle cases where the model might wrap JSON in markdown blocks
|
291
|
+
cleaned_response = response_text.strip()
|
292
|
+
if cleaned_response.startswith("```json"):
|
293
|
+
cleaned_response = cleaned_response[7:]
|
294
|
+
if cleaned_response.startswith("```"):
|
295
|
+
cleaned_response = cleaned_response[3:]
|
296
|
+
if cleaned_response.endswith("```"):
|
297
|
+
cleaned_response = cleaned_response[:-3]
|
298
|
+
cleaned_response = cleaned_response.strip()
|
299
|
+
|
300
|
+
config_dict = json.loads(cleaned_response)
|
301
|
+
config = AutoAgentsConfig(**config_dict)
|
260
302
|
|
261
303
|
# Ensure we have exactly max_agents number of agents
|
262
|
-
if len(
|
263
|
-
|
264
|
-
elif len(
|
265
|
-
logging.warning(f"Generated {len(
|
304
|
+
if len(config.agents) > self.max_agents:
|
305
|
+
config.agents = config.agents[:self.max_agents]
|
306
|
+
elif len(config.agents) < self.max_agents:
|
307
|
+
logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
|
266
308
|
|
267
|
-
return
|
309
|
+
return config
|
268
310
|
except Exception as e:
|
269
311
|
logging.error(f"Error generating configuration: {e}")
|
270
312
|
raise
|
@@ -87,6 +87,65 @@ class LLM:
|
|
87
87
|
"llama-3.2-90b-text-preview": 6144 # 8,192 actual
|
88
88
|
}
|
89
89
|
|
90
|
+
def _log_llm_config(self, method_name: str, **config):
|
91
|
+
"""Centralized debug logging for LLM configuration and parameters.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
method_name: The name of the method calling this logger (e.g., '__init__', 'get_response')
|
95
|
+
**config: Configuration parameters to log
|
96
|
+
"""
|
97
|
+
# Check for debug logging - either global debug level OR explicit verbose mode
|
98
|
+
verbose = config.get('verbose', self.verbose if hasattr(self, 'verbose') else False)
|
99
|
+
should_log = logging.getLogger().getEffectiveLevel() == logging.DEBUG or (not isinstance(verbose, bool) and verbose >= 10)
|
100
|
+
|
101
|
+
if should_log:
|
102
|
+
# Mask sensitive information
|
103
|
+
safe_config = config.copy()
|
104
|
+
if 'api_key' in safe_config:
|
105
|
+
safe_config['api_key'] = "***" if safe_config['api_key'] is not None else None
|
106
|
+
if 'extra_settings' in safe_config and isinstance(safe_config['extra_settings'], dict):
|
107
|
+
safe_config['extra_settings'] = {k: v for k, v in safe_config['extra_settings'].items() if k not in ["api_key"]}
|
108
|
+
|
109
|
+
# Handle special formatting for certain fields
|
110
|
+
if 'prompt' in safe_config:
|
111
|
+
prompt = safe_config['prompt']
|
112
|
+
# Convert to string first for consistent logging behavior
|
113
|
+
prompt_str = str(prompt) if not isinstance(prompt, str) else prompt
|
114
|
+
if len(prompt_str) > 100:
|
115
|
+
safe_config['prompt'] = prompt_str[:100] + "..."
|
116
|
+
else:
|
117
|
+
safe_config['prompt'] = prompt_str
|
118
|
+
if 'system_prompt' in safe_config:
|
119
|
+
sp = safe_config['system_prompt']
|
120
|
+
if sp and isinstance(sp, str) and len(sp) > 100:
|
121
|
+
safe_config['system_prompt'] = sp[:100] + "..."
|
122
|
+
if 'chat_history' in safe_config:
|
123
|
+
ch = safe_config['chat_history']
|
124
|
+
safe_config['chat_history'] = f"[{len(ch)} messages]" if ch else None
|
125
|
+
if 'tools' in safe_config:
|
126
|
+
tools = safe_config['tools']
|
127
|
+
# Check if tools is iterable before processing
|
128
|
+
if tools and hasattr(tools, '__iter__') and not isinstance(tools, str):
|
129
|
+
safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
|
130
|
+
else:
|
131
|
+
safe_config['tools'] = None
|
132
|
+
if 'output_json' in safe_config:
|
133
|
+
oj = safe_config['output_json']
|
134
|
+
safe_config['output_json'] = str(oj.__class__.__name__) if oj else None
|
135
|
+
if 'output_pydantic' in safe_config:
|
136
|
+
op = safe_config['output_pydantic']
|
137
|
+
safe_config['output_pydantic'] = str(op.__class__.__name__) if op else None
|
138
|
+
|
139
|
+
# Log based on method name - check more specific conditions first
|
140
|
+
if method_name == '__init__':
|
141
|
+
logging.debug(f"LLM instance initialized with: {json.dumps(safe_config, indent=2, default=str)}")
|
142
|
+
elif "parameters" in method_name:
|
143
|
+
logging.debug(f"{method_name}: {json.dumps(safe_config, indent=2, default=str)}")
|
144
|
+
elif "_async" in method_name:
|
145
|
+
logging.debug(f"LLM async instance configuration: {json.dumps(safe_config, indent=2, default=str)}")
|
146
|
+
else:
|
147
|
+
logging.debug(f"{method_name} configuration: {json.dumps(safe_config, indent=2, default=str)}")
|
148
|
+
|
90
149
|
def __init__(
|
91
150
|
self,
|
92
151
|
model: str,
|
@@ -181,35 +240,34 @@ class LLM:
|
|
181
240
|
litellm.modify_params = True
|
182
241
|
self._setup_event_tracking(events)
|
183
242
|
|
184
|
-
# Log all initialization parameters when in debug mode
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
|
243
|
+
# Log all initialization parameters when in debug mode or verbose >= 10
|
244
|
+
self._log_llm_config(
|
245
|
+
'__init__',
|
246
|
+
model=self.model,
|
247
|
+
timeout=self.timeout,
|
248
|
+
temperature=self.temperature,
|
249
|
+
top_p=self.top_p,
|
250
|
+
n=self.n,
|
251
|
+
max_tokens=self.max_tokens,
|
252
|
+
presence_penalty=self.presence_penalty,
|
253
|
+
frequency_penalty=self.frequency_penalty,
|
254
|
+
logit_bias=self.logit_bias,
|
255
|
+
response_format=self.response_format,
|
256
|
+
seed=self.seed,
|
257
|
+
logprobs=self.logprobs,
|
258
|
+
top_logprobs=self.top_logprobs,
|
259
|
+
api_version=self.api_version,
|
260
|
+
stop_phrases=self.stop_phrases,
|
261
|
+
api_key=self.api_key,
|
262
|
+
base_url=self.base_url,
|
263
|
+
verbose=self.verbose,
|
264
|
+
markdown=self.markdown,
|
265
|
+
self_reflect=self.self_reflect,
|
266
|
+
max_reflect=self.max_reflect,
|
267
|
+
min_reflect=self.min_reflect,
|
268
|
+
reasoning_steps=self.reasoning_steps,
|
269
|
+
extra_settings=self.extra_settings
|
270
|
+
)
|
213
271
|
|
214
272
|
def _is_ollama_provider(self) -> bool:
|
215
273
|
"""Detect if this is an Ollama provider regardless of naming convention"""
|
@@ -530,54 +588,53 @@ class LLM:
|
|
530
588
|
"""Enhanced get_response with all OpenAI-like features"""
|
531
589
|
logging.info(f"Getting response from {self.model}")
|
532
590
|
# Log all self values when in debug mode
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
548
|
-
|
549
|
-
|
550
|
-
|
551
|
-
|
552
|
-
|
553
|
-
|
554
|
-
|
555
|
-
|
556
|
-
|
557
|
-
|
558
|
-
|
559
|
-
|
560
|
-
|
561
|
-
|
562
|
-
|
563
|
-
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
|
574
|
-
|
575
|
-
|
576
|
-
|
577
|
-
|
578
|
-
|
579
|
-
|
580
|
-
logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
|
591
|
+
self._log_llm_config(
|
592
|
+
'LLM instance',
|
593
|
+
model=self.model,
|
594
|
+
timeout=self.timeout,
|
595
|
+
temperature=self.temperature,
|
596
|
+
top_p=self.top_p,
|
597
|
+
n=self.n,
|
598
|
+
max_tokens=self.max_tokens,
|
599
|
+
presence_penalty=self.presence_penalty,
|
600
|
+
frequency_penalty=self.frequency_penalty,
|
601
|
+
logit_bias=self.logit_bias,
|
602
|
+
response_format=self.response_format,
|
603
|
+
seed=self.seed,
|
604
|
+
logprobs=self.logprobs,
|
605
|
+
top_logprobs=self.top_logprobs,
|
606
|
+
api_version=self.api_version,
|
607
|
+
stop_phrases=self.stop_phrases,
|
608
|
+
api_key=self.api_key,
|
609
|
+
base_url=self.base_url,
|
610
|
+
verbose=self.verbose,
|
611
|
+
markdown=self.markdown,
|
612
|
+
self_reflect=self.self_reflect,
|
613
|
+
max_reflect=self.max_reflect,
|
614
|
+
min_reflect=self.min_reflect,
|
615
|
+
reasoning_steps=self.reasoning_steps
|
616
|
+
)
|
617
|
+
|
618
|
+
# Log the parameter values passed to get_response
|
619
|
+
self._log_llm_config(
|
620
|
+
'get_response parameters',
|
621
|
+
prompt=prompt,
|
622
|
+
system_prompt=system_prompt,
|
623
|
+
chat_history=chat_history,
|
624
|
+
temperature=temperature,
|
625
|
+
tools=tools,
|
626
|
+
output_json=output_json,
|
627
|
+
output_pydantic=output_pydantic,
|
628
|
+
verbose=verbose,
|
629
|
+
markdown=markdown,
|
630
|
+
self_reflect=self_reflect,
|
631
|
+
max_reflect=max_reflect,
|
632
|
+
min_reflect=min_reflect,
|
633
|
+
agent_name=agent_name,
|
634
|
+
agent_role=agent_role,
|
635
|
+
agent_tools=agent_tools,
|
636
|
+
kwargs=str(kwargs)
|
637
|
+
)
|
581
638
|
try:
|
582
639
|
import litellm
|
583
640
|
# This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
|
@@ -1186,54 +1243,53 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1186
1243
|
import litellm
|
1187
1244
|
logging.info(f"Getting async response from {self.model}")
|
1188
1245
|
# Log all self values when in debug mode
|
1189
|
-
|
1190
|
-
|
1191
|
-
|
1192
|
-
|
1193
|
-
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1197
|
-
|
1198
|
-
|
1199
|
-
|
1200
|
-
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1208
|
-
|
1209
|
-
|
1210
|
-
|
1211
|
-
|
1212
|
-
|
1213
|
-
|
1214
|
-
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1221
|
-
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
|
1226
|
-
|
1227
|
-
|
1228
|
-
|
1229
|
-
|
1230
|
-
|
1231
|
-
|
1232
|
-
|
1233
|
-
|
1234
|
-
|
1235
|
-
|
1236
|
-
logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
|
1246
|
+
self._log_llm_config(
|
1247
|
+
'get_response_async',
|
1248
|
+
model=self.model,
|
1249
|
+
timeout=self.timeout,
|
1250
|
+
temperature=self.temperature,
|
1251
|
+
top_p=self.top_p,
|
1252
|
+
n=self.n,
|
1253
|
+
max_tokens=self.max_tokens,
|
1254
|
+
presence_penalty=self.presence_penalty,
|
1255
|
+
frequency_penalty=self.frequency_penalty,
|
1256
|
+
logit_bias=self.logit_bias,
|
1257
|
+
response_format=self.response_format,
|
1258
|
+
seed=self.seed,
|
1259
|
+
logprobs=self.logprobs,
|
1260
|
+
top_logprobs=self.top_logprobs,
|
1261
|
+
api_version=self.api_version,
|
1262
|
+
stop_phrases=self.stop_phrases,
|
1263
|
+
api_key=self.api_key,
|
1264
|
+
base_url=self.base_url,
|
1265
|
+
verbose=self.verbose,
|
1266
|
+
markdown=self.markdown,
|
1267
|
+
self_reflect=self.self_reflect,
|
1268
|
+
max_reflect=self.max_reflect,
|
1269
|
+
min_reflect=self.min_reflect,
|
1270
|
+
reasoning_steps=self.reasoning_steps
|
1271
|
+
)
|
1272
|
+
|
1273
|
+
# Log the parameter values passed to get_response_async
|
1274
|
+
self._log_llm_config(
|
1275
|
+
'get_response_async parameters',
|
1276
|
+
prompt=prompt,
|
1277
|
+
system_prompt=system_prompt,
|
1278
|
+
chat_history=chat_history,
|
1279
|
+
temperature=temperature,
|
1280
|
+
tools=tools,
|
1281
|
+
output_json=output_json,
|
1282
|
+
output_pydantic=output_pydantic,
|
1283
|
+
verbose=verbose,
|
1284
|
+
markdown=markdown,
|
1285
|
+
self_reflect=self_reflect,
|
1286
|
+
max_reflect=max_reflect,
|
1287
|
+
min_reflect=min_reflect,
|
1288
|
+
agent_name=agent_name,
|
1289
|
+
agent_role=agent_role,
|
1290
|
+
agent_tools=agent_tools,
|
1291
|
+
kwargs=str(kwargs)
|
1292
|
+
)
|
1237
1293
|
reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
|
1238
1294
|
litellm.set_verbose = False
|
1239
1295
|
|
@@ -1867,22 +1923,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1867
1923
|
logger.debug("Using synchronous response function")
|
1868
1924
|
|
1869
1925
|
# Log all self values when in debug mode
|
1870
|
-
|
1871
|
-
|
1872
|
-
|
1873
|
-
|
1874
|
-
|
1875
|
-
|
1876
|
-
|
1877
|
-
|
1878
|
-
|
1879
|
-
|
1880
|
-
|
1881
|
-
|
1882
|
-
|
1883
|
-
|
1884
|
-
|
1885
|
-
logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
1926
|
+
self._log_llm_config(
|
1927
|
+
'Response method',
|
1928
|
+
model=self.model,
|
1929
|
+
timeout=self.timeout,
|
1930
|
+
temperature=temperature,
|
1931
|
+
top_p=self.top_p,
|
1932
|
+
n=self.n,
|
1933
|
+
max_tokens=self.max_tokens,
|
1934
|
+
presence_penalty=self.presence_penalty,
|
1935
|
+
frequency_penalty=self.frequency_penalty,
|
1936
|
+
stream=stream,
|
1937
|
+
verbose=verbose,
|
1938
|
+
markdown=markdown,
|
1939
|
+
kwargs=str(kwargs)
|
1940
|
+
)
|
1886
1941
|
|
1887
1942
|
# Build messages list using shared helper (simplified version without JSON output)
|
1888
1943
|
messages, _ = self._build_messages(
|
@@ -1968,22 +2023,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1968
2023
|
logger.debug("Using asynchronous response function")
|
1969
2024
|
|
1970
2025
|
# Log all self values when in debug mode
|
1971
|
-
|
1972
|
-
|
1973
|
-
|
1974
|
-
|
1975
|
-
|
1976
|
-
|
1977
|
-
|
1978
|
-
|
1979
|
-
|
1980
|
-
|
1981
|
-
|
1982
|
-
|
1983
|
-
|
1984
|
-
|
1985
|
-
|
1986
|
-
logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
2026
|
+
self._log_llm_config(
|
2027
|
+
'Async response method',
|
2028
|
+
model=self.model,
|
2029
|
+
timeout=self.timeout,
|
2030
|
+
temperature=temperature,
|
2031
|
+
top_p=self.top_p,
|
2032
|
+
n=self.n,
|
2033
|
+
max_tokens=self.max_tokens,
|
2034
|
+
presence_penalty=self.presence_penalty,
|
2035
|
+
frequency_penalty=self.frequency_penalty,
|
2036
|
+
stream=stream,
|
2037
|
+
verbose=verbose,
|
2038
|
+
markdown=markdown,
|
2039
|
+
kwargs=str(kwargs)
|
2040
|
+
)
|
1987
2041
|
|
1988
2042
|
# Build messages list using shared helper (simplified version without JSON output)
|
1989
2043
|
messages, _ = self._build_messages(
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/guardrail_result.py
RENAMED
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/llm_guardrail.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/integration.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/calculator_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckduckgo_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/newspaper_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/train/data/generatecot.py
RENAMED
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents/tools/wikipedia_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.119 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|