praisonaiagents 0.0.118__tar.gz → 0.0.120__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/PKG-INFO +1 -1
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/agent.py +14 -5
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agents/autoagents.py +65 -23
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/llm/llm.py +256 -219
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/pyproject.toml +1 -1
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/README.md +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/llm/openai_client.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/setup.cfg +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/tests/test_posthog_fixed.py +0 -0
@@ -1233,7 +1233,8 @@ Your Goal: {self.goal}
|
|
1233
1233
|
# Add to chat history and return raw response
|
1234
1234
|
self.chat_history.append({"role": "user", "content": original_prompt})
|
1235
1235
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1236
|
-
if
|
1236
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1237
|
+
if self.verbose and not self._using_custom_llm:
|
1237
1238
|
display_interaction(original_prompt, response_text, markdown=self.markdown,
|
1238
1239
|
generation_time=time.time() - start_time, console=self.console)
|
1239
1240
|
return response_text
|
@@ -1243,7 +1244,9 @@ Your Goal: {self.goal}
|
|
1243
1244
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1244
1245
|
if self.verbose:
|
1245
1246
|
logging.debug(f"Agent {self.name} final response: {response_text}")
|
1246
|
-
|
1247
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1248
|
+
if self.verbose and not self._using_custom_llm:
|
1249
|
+
display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1247
1250
|
# Return only reasoning content if reasoning_steps is True
|
1248
1251
|
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
1249
1252
|
# Apply guardrail to reasoning content
|
@@ -1279,7 +1282,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1279
1282
|
# Return the original response without reflection
|
1280
1283
|
self.chat_history.append({"role": "user", "content": prompt})
|
1281
1284
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1282
|
-
|
1285
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1286
|
+
if self.verbose and not self._using_custom_llm:
|
1287
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1283
1288
|
return response_text
|
1284
1289
|
|
1285
1290
|
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
@@ -1302,7 +1307,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1302
1307
|
display_self_reflection("Agent marked the response as satisfactory after meeting minimum reflections", console=self.console)
|
1303
1308
|
self.chat_history.append({"role": "user", "content": prompt})
|
1304
1309
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1305
|
-
|
1310
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1311
|
+
if self.verbose and not self._using_custom_llm:
|
1312
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1306
1313
|
# Apply guardrail validation after satisfactory reflection
|
1307
1314
|
try:
|
1308
1315
|
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
@@ -1317,7 +1324,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1317
1324
|
display_self_reflection("Maximum reflection count reached, returning current response", console=self.console)
|
1318
1325
|
self.chat_history.append({"role": "user", "content": prompt})
|
1319
1326
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1320
|
-
|
1327
|
+
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1328
|
+
if self.verbose and not self._using_custom_llm:
|
1329
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1321
1330
|
# Apply guardrail validation after max reflections
|
1322
1331
|
try:
|
1323
1332
|
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
@@ -13,7 +13,8 @@ import logging
|
|
13
13
|
import os
|
14
14
|
from pydantic import BaseModel, ConfigDict
|
15
15
|
from ..main import display_instruction, display_tool_call, display_interaction
|
16
|
-
from ..llm import get_openai_client
|
16
|
+
from ..llm import get_openai_client, LLM
|
17
|
+
import json
|
17
18
|
|
18
19
|
# Define Pydantic models for structured output
|
19
20
|
class TaskConfig(BaseModel):
|
@@ -238,33 +239,74 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
|
|
238
239
|
"""
|
239
240
|
|
240
241
|
try:
|
241
|
-
#
|
242
|
+
# Try to use OpenAI's structured output if available
|
243
|
+
use_openai_structured = False
|
244
|
+
client = None
|
245
|
+
|
242
246
|
try:
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
|
247
|
+
# Check if we have OpenAI API and the model supports structured output
|
248
|
+
if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
|
249
|
+
client = get_openai_client()
|
250
|
+
use_openai_structured = True
|
251
|
+
except:
|
252
|
+
# If OpenAI client is not available, we'll use the LLM class
|
253
|
+
pass
|
254
|
+
|
255
|
+
if use_openai_structured and client:
|
256
|
+
# Use OpenAI's structured output for OpenAI models (backward compatibility)
|
257
|
+
response = client.beta.chat.completions.parse(
|
258
|
+
model=self.llm,
|
259
|
+
response_format=AutoAgentsConfig,
|
260
|
+
messages=[
|
261
|
+
{"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
|
262
|
+
{"role": "user", "content": prompt}
|
263
|
+
]
|
264
|
+
)
|
265
|
+
config = response.choices[0].message.parsed
|
266
|
+
else:
|
267
|
+
# Use LLM class for all other providers (Gemini, Anthropic, etc.)
|
268
|
+
llm_instance = LLM(
|
269
|
+
model=self.llm,
|
270
|
+
base_url=self.base_url,
|
271
|
+
api_key=self.api_key
|
272
|
+
)
|
251
273
|
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
274
|
+
response_text = llm_instance.response(
|
275
|
+
prompt=prompt,
|
276
|
+
system_prompt="You are a helpful assistant designed to generate AI agent configurations.",
|
277
|
+
output_pydantic=AutoAgentsConfig,
|
278
|
+
temperature=0.7,
|
279
|
+
stream=False,
|
280
|
+
verbose=False
|
281
|
+
)
|
282
|
+
|
283
|
+
# Parse the JSON response
|
284
|
+
try:
|
285
|
+
# First try to parse as is
|
286
|
+
config_dict = json.loads(response_text)
|
287
|
+
config = AutoAgentsConfig(**config_dict)
|
288
|
+
except json.JSONDecodeError:
|
289
|
+
# If that fails, try to extract JSON from the response
|
290
|
+
# Handle cases where the model might wrap JSON in markdown blocks
|
291
|
+
cleaned_response = response_text.strip()
|
292
|
+
if cleaned_response.startswith("```json"):
|
293
|
+
cleaned_response = cleaned_response[7:]
|
294
|
+
if cleaned_response.startswith("```"):
|
295
|
+
cleaned_response = cleaned_response[3:]
|
296
|
+
if cleaned_response.endswith("```"):
|
297
|
+
cleaned_response = cleaned_response[:-3]
|
298
|
+
cleaned_response = cleaned_response.strip()
|
299
|
+
|
300
|
+
config_dict = json.loads(cleaned_response)
|
301
|
+
config = AutoAgentsConfig(**config_dict)
|
260
302
|
|
261
303
|
# Ensure we have exactly max_agents number of agents
|
262
|
-
if len(
|
263
|
-
|
264
|
-
elif len(
|
265
|
-
logging.warning(f"Generated {len(
|
304
|
+
if len(config.agents) > self.max_agents:
|
305
|
+
config.agents = config.agents[:self.max_agents]
|
306
|
+
elif len(config.agents) < self.max_agents:
|
307
|
+
logging.warning(f"Generated {len(config.agents)} agents, expected {self.max_agents}")
|
266
308
|
|
267
|
-
return
|
309
|
+
return config
|
268
310
|
except Exception as e:
|
269
311
|
logging.error(f"Error generating configuration: {e}")
|
270
312
|
raise
|
@@ -87,6 +87,65 @@ class LLM:
|
|
87
87
|
"llama-3.2-90b-text-preview": 6144 # 8,192 actual
|
88
88
|
}
|
89
89
|
|
90
|
+
def _log_llm_config(self, method_name: str, **config):
|
91
|
+
"""Centralized debug logging for LLM configuration and parameters.
|
92
|
+
|
93
|
+
Args:
|
94
|
+
method_name: The name of the method calling this logger (e.g., '__init__', 'get_response')
|
95
|
+
**config: Configuration parameters to log
|
96
|
+
"""
|
97
|
+
# Check for debug logging - either global debug level OR explicit verbose mode
|
98
|
+
verbose = config.get('verbose', self.verbose if hasattr(self, 'verbose') else False)
|
99
|
+
should_log = logging.getLogger().getEffectiveLevel() == logging.DEBUG or (not isinstance(verbose, bool) and verbose >= 10)
|
100
|
+
|
101
|
+
if should_log:
|
102
|
+
# Mask sensitive information
|
103
|
+
safe_config = config.copy()
|
104
|
+
if 'api_key' in safe_config:
|
105
|
+
safe_config['api_key'] = "***" if safe_config['api_key'] is not None else None
|
106
|
+
if 'extra_settings' in safe_config and isinstance(safe_config['extra_settings'], dict):
|
107
|
+
safe_config['extra_settings'] = {k: v for k, v in safe_config['extra_settings'].items() if k not in ["api_key"]}
|
108
|
+
|
109
|
+
# Handle special formatting for certain fields
|
110
|
+
if 'prompt' in safe_config:
|
111
|
+
prompt = safe_config['prompt']
|
112
|
+
# Convert to string first for consistent logging behavior
|
113
|
+
prompt_str = str(prompt) if not isinstance(prompt, str) else prompt
|
114
|
+
if len(prompt_str) > 100:
|
115
|
+
safe_config['prompt'] = prompt_str[:100] + "..."
|
116
|
+
else:
|
117
|
+
safe_config['prompt'] = prompt_str
|
118
|
+
if 'system_prompt' in safe_config:
|
119
|
+
sp = safe_config['system_prompt']
|
120
|
+
if sp and isinstance(sp, str) and len(sp) > 100:
|
121
|
+
safe_config['system_prompt'] = sp[:100] + "..."
|
122
|
+
if 'chat_history' in safe_config:
|
123
|
+
ch = safe_config['chat_history']
|
124
|
+
safe_config['chat_history'] = f"[{len(ch)} messages]" if ch else None
|
125
|
+
if 'tools' in safe_config:
|
126
|
+
tools = safe_config['tools']
|
127
|
+
# Check if tools is iterable before processing
|
128
|
+
if tools and hasattr(tools, '__iter__') and not isinstance(tools, str):
|
129
|
+
safe_config['tools'] = [t.__name__ if hasattr(t, "__name__") else str(t) for t in tools]
|
130
|
+
else:
|
131
|
+
safe_config['tools'] = None
|
132
|
+
if 'output_json' in safe_config:
|
133
|
+
oj = safe_config['output_json']
|
134
|
+
safe_config['output_json'] = str(oj.__class__.__name__) if oj else None
|
135
|
+
if 'output_pydantic' in safe_config:
|
136
|
+
op = safe_config['output_pydantic']
|
137
|
+
safe_config['output_pydantic'] = str(op.__class__.__name__) if op else None
|
138
|
+
|
139
|
+
# Log based on method name - check more specific conditions first
|
140
|
+
if method_name == '__init__':
|
141
|
+
logging.debug(f"LLM instance initialized with: {json.dumps(safe_config, indent=2, default=str)}")
|
142
|
+
elif "parameters" in method_name:
|
143
|
+
logging.debug(f"{method_name}: {json.dumps(safe_config, indent=2, default=str)}")
|
144
|
+
elif "_async" in method_name:
|
145
|
+
logging.debug(f"LLM async instance configuration: {json.dumps(safe_config, indent=2, default=str)}")
|
146
|
+
else:
|
147
|
+
logging.debug(f"{method_name} configuration: {json.dumps(safe_config, indent=2, default=str)}")
|
148
|
+
|
90
149
|
def __init__(
|
91
150
|
self,
|
92
151
|
model: str,
|
@@ -181,35 +240,34 @@ class LLM:
|
|
181
240
|
litellm.modify_params = True
|
182
241
|
self._setup_event_tracking(events)
|
183
242
|
|
184
|
-
# Log all initialization parameters when in debug mode
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
|
243
|
+
# Log all initialization parameters when in debug mode or verbose >= 10
|
244
|
+
self._log_llm_config(
|
245
|
+
'__init__',
|
246
|
+
model=self.model,
|
247
|
+
timeout=self.timeout,
|
248
|
+
temperature=self.temperature,
|
249
|
+
top_p=self.top_p,
|
250
|
+
n=self.n,
|
251
|
+
max_tokens=self.max_tokens,
|
252
|
+
presence_penalty=self.presence_penalty,
|
253
|
+
frequency_penalty=self.frequency_penalty,
|
254
|
+
logit_bias=self.logit_bias,
|
255
|
+
response_format=self.response_format,
|
256
|
+
seed=self.seed,
|
257
|
+
logprobs=self.logprobs,
|
258
|
+
top_logprobs=self.top_logprobs,
|
259
|
+
api_version=self.api_version,
|
260
|
+
stop_phrases=self.stop_phrases,
|
261
|
+
api_key=self.api_key,
|
262
|
+
base_url=self.base_url,
|
263
|
+
verbose=self.verbose,
|
264
|
+
markdown=self.markdown,
|
265
|
+
self_reflect=self.self_reflect,
|
266
|
+
max_reflect=self.max_reflect,
|
267
|
+
min_reflect=self.min_reflect,
|
268
|
+
reasoning_steps=self.reasoning_steps,
|
269
|
+
extra_settings=self.extra_settings
|
270
|
+
)
|
213
271
|
|
214
272
|
def _is_ollama_provider(self) -> bool:
|
215
273
|
"""Detect if this is an Ollama provider regardless of naming convention"""
|
@@ -229,6 +287,39 @@ class LLM:
|
|
229
287
|
|
230
288
|
return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints)
|
231
289
|
|
290
|
+
def _process_stream_delta(self, delta, response_text: str, tool_calls: List[Dict], formatted_tools: Optional[List] = None) -> tuple:
|
291
|
+
"""
|
292
|
+
Process a streaming delta chunk to extract content and tool calls.
|
293
|
+
|
294
|
+
Args:
|
295
|
+
delta: The delta object from a streaming chunk
|
296
|
+
response_text: The accumulated response text so far
|
297
|
+
tool_calls: The accumulated tool calls list so far
|
298
|
+
formatted_tools: Optional list of formatted tools for tool call support check
|
299
|
+
|
300
|
+
Returns:
|
301
|
+
tuple: (updated_response_text, updated_tool_calls)
|
302
|
+
"""
|
303
|
+
# Process content
|
304
|
+
if delta.content:
|
305
|
+
response_text += delta.content
|
306
|
+
|
307
|
+
# Capture tool calls from streaming chunks if provider supports it
|
308
|
+
if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
|
309
|
+
for tc in delta.tool_calls:
|
310
|
+
if tc.index >= len(tool_calls):
|
311
|
+
tool_calls.append({
|
312
|
+
"id": tc.id,
|
313
|
+
"type": "function",
|
314
|
+
"function": {"name": "", "arguments": ""}
|
315
|
+
})
|
316
|
+
if tc.function.name:
|
317
|
+
tool_calls[tc.index]["function"]["name"] = tc.function.name
|
318
|
+
if tc.function.arguments:
|
319
|
+
tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
|
320
|
+
|
321
|
+
return response_text, tool_calls
|
322
|
+
|
232
323
|
def _parse_tool_call_arguments(self, tool_call: Dict, is_ollama: bool = False) -> tuple:
|
233
324
|
"""
|
234
325
|
Safely parse tool call arguments with proper error handling
|
@@ -497,54 +588,53 @@ class LLM:
|
|
497
588
|
"""Enhanced get_response with all OpenAI-like features"""
|
498
589
|
logging.info(f"Getting response from {self.model}")
|
499
590
|
# Log all self values when in debug mode
|
500
|
-
|
501
|
-
|
502
|
-
|
503
|
-
|
504
|
-
|
505
|
-
|
506
|
-
|
507
|
-
|
508
|
-
|
509
|
-
|
510
|
-
|
511
|
-
|
512
|
-
|
513
|
-
|
514
|
-
|
515
|
-
|
516
|
-
|
517
|
-
|
518
|
-
|
519
|
-
|
520
|
-
|
521
|
-
|
522
|
-
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
527
|
-
|
528
|
-
|
529
|
-
|
530
|
-
|
531
|
-
|
532
|
-
|
533
|
-
|
534
|
-
|
535
|
-
|
536
|
-
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
logging.debug(f"get_response parameters: {json.dumps(param_info, indent=2, default=str)}")
|
591
|
+
self._log_llm_config(
|
592
|
+
'LLM instance',
|
593
|
+
model=self.model,
|
594
|
+
timeout=self.timeout,
|
595
|
+
temperature=self.temperature,
|
596
|
+
top_p=self.top_p,
|
597
|
+
n=self.n,
|
598
|
+
max_tokens=self.max_tokens,
|
599
|
+
presence_penalty=self.presence_penalty,
|
600
|
+
frequency_penalty=self.frequency_penalty,
|
601
|
+
logit_bias=self.logit_bias,
|
602
|
+
response_format=self.response_format,
|
603
|
+
seed=self.seed,
|
604
|
+
logprobs=self.logprobs,
|
605
|
+
top_logprobs=self.top_logprobs,
|
606
|
+
api_version=self.api_version,
|
607
|
+
stop_phrases=self.stop_phrases,
|
608
|
+
api_key=self.api_key,
|
609
|
+
base_url=self.base_url,
|
610
|
+
verbose=self.verbose,
|
611
|
+
markdown=self.markdown,
|
612
|
+
self_reflect=self.self_reflect,
|
613
|
+
max_reflect=self.max_reflect,
|
614
|
+
min_reflect=self.min_reflect,
|
615
|
+
reasoning_steps=self.reasoning_steps
|
616
|
+
)
|
617
|
+
|
618
|
+
# Log the parameter values passed to get_response
|
619
|
+
self._log_llm_config(
|
620
|
+
'get_response parameters',
|
621
|
+
prompt=prompt,
|
622
|
+
system_prompt=system_prompt,
|
623
|
+
chat_history=chat_history,
|
624
|
+
temperature=temperature,
|
625
|
+
tools=tools,
|
626
|
+
output_json=output_json,
|
627
|
+
output_pydantic=output_pydantic,
|
628
|
+
verbose=verbose,
|
629
|
+
markdown=markdown,
|
630
|
+
self_reflect=self_reflect,
|
631
|
+
max_reflect=max_reflect,
|
632
|
+
min_reflect=min_reflect,
|
633
|
+
agent_name=agent_name,
|
634
|
+
agent_role=agent_role,
|
635
|
+
agent_tools=agent_tools,
|
636
|
+
kwargs=str(kwargs)
|
637
|
+
)
|
548
638
|
try:
|
549
639
|
import litellm
|
550
640
|
# This below **kwargs** is passed to .completion() directly. so reasoning_steps has to be popped. OR find alternate best way of handling this.
|
@@ -651,23 +741,11 @@ class LLM:
|
|
651
741
|
):
|
652
742
|
if chunk and chunk.choices and chunk.choices[0].delta:
|
653
743
|
delta = chunk.choices[0].delta
|
744
|
+
response_text, tool_calls = self._process_stream_delta(
|
745
|
+
delta, response_text, tool_calls, formatted_tools
|
746
|
+
)
|
654
747
|
if delta.content:
|
655
|
-
response_text += delta.content
|
656
748
|
live.update(display_generating(response_text, current_time))
|
657
|
-
|
658
|
-
# Capture tool calls from streaming chunks if provider supports it
|
659
|
-
if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
|
660
|
-
for tc in delta.tool_calls:
|
661
|
-
if tc.index >= len(tool_calls):
|
662
|
-
tool_calls.append({
|
663
|
-
"id": tc.id,
|
664
|
-
"type": "function",
|
665
|
-
"function": {"name": "", "arguments": ""}
|
666
|
-
})
|
667
|
-
if tc.function.name:
|
668
|
-
tool_calls[tc.index]["function"]["name"] = tc.function.name
|
669
|
-
if tc.function.arguments:
|
670
|
-
tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
|
671
749
|
else:
|
672
750
|
# Non-verbose streaming
|
673
751
|
for chunk in litellm.completion(
|
@@ -681,22 +759,9 @@ class LLM:
|
|
681
759
|
):
|
682
760
|
if chunk and chunk.choices and chunk.choices[0].delta:
|
683
761
|
delta = chunk.choices[0].delta
|
684
|
-
|
685
|
-
response_text
|
686
|
-
|
687
|
-
# Capture tool calls from streaming chunks if provider supports it
|
688
|
-
if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
|
689
|
-
for tc in delta.tool_calls:
|
690
|
-
if tc.index >= len(tool_calls):
|
691
|
-
tool_calls.append({
|
692
|
-
"id": tc.id,
|
693
|
-
"type": "function",
|
694
|
-
"function": {"name": "", "arguments": ""}
|
695
|
-
})
|
696
|
-
if tc.function.name:
|
697
|
-
tool_calls[tc.index]["function"]["name"] = tc.function.name
|
698
|
-
if tc.function.arguments:
|
699
|
-
tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
|
762
|
+
response_text, tool_calls = self._process_stream_delta(
|
763
|
+
delta, response_text, tool_calls, formatted_tools
|
764
|
+
)
|
700
765
|
|
701
766
|
response_text = response_text.strip()
|
702
767
|
|
@@ -1178,54 +1243,53 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1178
1243
|
import litellm
|
1179
1244
|
logging.info(f"Getting async response from {self.model}")
|
1180
1245
|
# Log all self values when in debug mode
|
1181
|
-
|
1182
|
-
|
1183
|
-
|
1184
|
-
|
1185
|
-
|
1186
|
-
|
1187
|
-
|
1188
|
-
|
1189
|
-
|
1190
|
-
|
1191
|
-
|
1192
|
-
|
1193
|
-
|
1194
|
-
|
1195
|
-
|
1196
|
-
|
1197
|
-
|
1198
|
-
|
1199
|
-
|
1200
|
-
|
1201
|
-
|
1202
|
-
|
1203
|
-
|
1204
|
-
|
1205
|
-
|
1206
|
-
|
1207
|
-
|
1208
|
-
|
1209
|
-
|
1210
|
-
|
1211
|
-
|
1212
|
-
|
1213
|
-
|
1214
|
-
|
1215
|
-
|
1216
|
-
|
1217
|
-
|
1218
|
-
|
1219
|
-
|
1220
|
-
|
1221
|
-
|
1222
|
-
|
1223
|
-
|
1224
|
-
|
1225
|
-
|
1226
|
-
|
1227
|
-
|
1228
|
-
logging.debug(f"get_response_async parameters: {json.dumps(param_info, indent=2, default=str)}")
|
1246
|
+
self._log_llm_config(
|
1247
|
+
'get_response_async',
|
1248
|
+
model=self.model,
|
1249
|
+
timeout=self.timeout,
|
1250
|
+
temperature=self.temperature,
|
1251
|
+
top_p=self.top_p,
|
1252
|
+
n=self.n,
|
1253
|
+
max_tokens=self.max_tokens,
|
1254
|
+
presence_penalty=self.presence_penalty,
|
1255
|
+
frequency_penalty=self.frequency_penalty,
|
1256
|
+
logit_bias=self.logit_bias,
|
1257
|
+
response_format=self.response_format,
|
1258
|
+
seed=self.seed,
|
1259
|
+
logprobs=self.logprobs,
|
1260
|
+
top_logprobs=self.top_logprobs,
|
1261
|
+
api_version=self.api_version,
|
1262
|
+
stop_phrases=self.stop_phrases,
|
1263
|
+
api_key=self.api_key,
|
1264
|
+
base_url=self.base_url,
|
1265
|
+
verbose=self.verbose,
|
1266
|
+
markdown=self.markdown,
|
1267
|
+
self_reflect=self.self_reflect,
|
1268
|
+
max_reflect=self.max_reflect,
|
1269
|
+
min_reflect=self.min_reflect,
|
1270
|
+
reasoning_steps=self.reasoning_steps
|
1271
|
+
)
|
1272
|
+
|
1273
|
+
# Log the parameter values passed to get_response_async
|
1274
|
+
self._log_llm_config(
|
1275
|
+
'get_response_async parameters',
|
1276
|
+
prompt=prompt,
|
1277
|
+
system_prompt=system_prompt,
|
1278
|
+
chat_history=chat_history,
|
1279
|
+
temperature=temperature,
|
1280
|
+
tools=tools,
|
1281
|
+
output_json=output_json,
|
1282
|
+
output_pydantic=output_pydantic,
|
1283
|
+
verbose=verbose,
|
1284
|
+
markdown=markdown,
|
1285
|
+
self_reflect=self_reflect,
|
1286
|
+
max_reflect=max_reflect,
|
1287
|
+
min_reflect=min_reflect,
|
1288
|
+
agent_name=agent_name,
|
1289
|
+
agent_role=agent_role,
|
1290
|
+
agent_tools=agent_tools,
|
1291
|
+
kwargs=str(kwargs)
|
1292
|
+
)
|
1229
1293
|
reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
|
1230
1294
|
litellm.set_verbose = False
|
1231
1295
|
|
@@ -1297,24 +1361,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1297
1361
|
):
|
1298
1362
|
if chunk and chunk.choices and chunk.choices[0].delta:
|
1299
1363
|
delta = chunk.choices[0].delta
|
1364
|
+
response_text, tool_calls = self._process_stream_delta(
|
1365
|
+
delta, response_text, tool_calls, formatted_tools
|
1366
|
+
)
|
1300
1367
|
if delta.content:
|
1301
|
-
response_text += delta.content
|
1302
1368
|
print("\033[K", end="\r")
|
1303
1369
|
print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
|
1304
|
-
|
1305
|
-
# Capture tool calls from streaming chunks if provider supports it
|
1306
|
-
if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
|
1307
|
-
for tc in delta.tool_calls:
|
1308
|
-
if tc.index >= len(tool_calls):
|
1309
|
-
tool_calls.append({
|
1310
|
-
"id": tc.id,
|
1311
|
-
"type": "function",
|
1312
|
-
"function": {"name": "", "arguments": ""}
|
1313
|
-
})
|
1314
|
-
if tc.function.name:
|
1315
|
-
tool_calls[tc.index]["function"]["name"] = tc.function.name
|
1316
|
-
if tc.function.arguments:
|
1317
|
-
tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
|
1318
1370
|
else:
|
1319
1371
|
# Non-verbose streaming
|
1320
1372
|
async for chunk in await litellm.acompletion(
|
@@ -1328,22 +1380,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1328
1380
|
):
|
1329
1381
|
if chunk and chunk.choices and chunk.choices[0].delta:
|
1330
1382
|
delta = chunk.choices[0].delta
|
1331
|
-
|
1332
|
-
response_text
|
1333
|
-
|
1334
|
-
# Capture tool calls from streaming chunks if provider supports it
|
1335
|
-
if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
|
1336
|
-
for tc in delta.tool_calls:
|
1337
|
-
if tc.index >= len(tool_calls):
|
1338
|
-
tool_calls.append({
|
1339
|
-
"id": tc.id,
|
1340
|
-
"type": "function",
|
1341
|
-
"function": {"name": "", "arguments": ""}
|
1342
|
-
})
|
1343
|
-
if tc.function.name:
|
1344
|
-
tool_calls[tc.index]["function"]["name"] = tc.function.name
|
1345
|
-
if tc.function.arguments:
|
1346
|
-
tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
|
1383
|
+
response_text, tool_calls = self._process_stream_delta(
|
1384
|
+
delta, response_text, tool_calls, formatted_tools
|
1385
|
+
)
|
1347
1386
|
|
1348
1387
|
response_text = response_text.strip()
|
1349
1388
|
|
@@ -1884,22 +1923,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1884
1923
|
logger.debug("Using synchronous response function")
|
1885
1924
|
|
1886
1925
|
# Log all self values when in debug mode
|
1887
|
-
|
1888
|
-
|
1889
|
-
|
1890
|
-
|
1891
|
-
|
1892
|
-
|
1893
|
-
|
1894
|
-
|
1895
|
-
|
1896
|
-
|
1897
|
-
|
1898
|
-
|
1899
|
-
|
1900
|
-
|
1901
|
-
|
1902
|
-
logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
1926
|
+
self._log_llm_config(
|
1927
|
+
'Response method',
|
1928
|
+
model=self.model,
|
1929
|
+
timeout=self.timeout,
|
1930
|
+
temperature=temperature,
|
1931
|
+
top_p=self.top_p,
|
1932
|
+
n=self.n,
|
1933
|
+
max_tokens=self.max_tokens,
|
1934
|
+
presence_penalty=self.presence_penalty,
|
1935
|
+
frequency_penalty=self.frequency_penalty,
|
1936
|
+
stream=stream,
|
1937
|
+
verbose=verbose,
|
1938
|
+
markdown=markdown,
|
1939
|
+
kwargs=str(kwargs)
|
1940
|
+
)
|
1903
1941
|
|
1904
1942
|
# Build messages list using shared helper (simplified version without JSON output)
|
1905
1943
|
messages, _ = self._build_messages(
|
@@ -1985,22 +2023,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1985
2023
|
logger.debug("Using asynchronous response function")
|
1986
2024
|
|
1987
2025
|
# Log all self values when in debug mode
|
1988
|
-
|
1989
|
-
|
1990
|
-
|
1991
|
-
|
1992
|
-
|
1993
|
-
|
1994
|
-
|
1995
|
-
|
1996
|
-
|
1997
|
-
|
1998
|
-
|
1999
|
-
|
2000
|
-
|
2001
|
-
|
2002
|
-
|
2003
|
-
logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
|
2026
|
+
self._log_llm_config(
|
2027
|
+
'Async response method',
|
2028
|
+
model=self.model,
|
2029
|
+
timeout=self.timeout,
|
2030
|
+
temperature=temperature,
|
2031
|
+
top_p=self.top_p,
|
2032
|
+
n=self.n,
|
2033
|
+
max_tokens=self.max_tokens,
|
2034
|
+
presence_penalty=self.presence_penalty,
|
2035
|
+
frequency_penalty=self.frequency_penalty,
|
2036
|
+
stream=stream,
|
2037
|
+
verbose=verbose,
|
2038
|
+
markdown=markdown,
|
2039
|
+
kwargs=str(kwargs)
|
2040
|
+
)
|
2004
2041
|
|
2005
2042
|
# Build messages list using shared helper (simplified version without JSON output)
|
2006
2043
|
messages, _ = self._build_messages(
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/guardrail_result.py
RENAMED
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/guardrails/llm_guardrail.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/telemetry/integration.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/calculator_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/duckduckgo_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/newspaper_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/train/data/generatecot.py
RENAMED
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents/tools/wikipedia_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.118 → praisonaiagents-0.0.120}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|