praisonaiagents 0.0.140__py3-none-any.whl → 0.0.142__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +47 -34
- praisonaiagents/llm/llm.py +114 -34
- praisonaiagents/telemetry/__init__.py +2 -2
- praisonaiagents/telemetry/telemetry.py +2 -2
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.142.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.142.dist-info}/RECORD +8 -8
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.142.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.142.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -354,6 +354,8 @@ class Agent:
|
|
354
354
|
self.instructions = instructions
|
355
355
|
# Check for model name in environment variable if not provided
|
356
356
|
self._using_custom_llm = False
|
357
|
+
# Flag to track if final result has been displayed to prevent duplicates
|
358
|
+
self._final_display_shown = False
|
357
359
|
|
358
360
|
# Store OpenAI client parameters for lazy initialization
|
359
361
|
self._openai_api_key = api_key
|
@@ -653,7 +655,7 @@ Your Goal: {self.goal}
|
|
653
655
|
error=f"Agent guardrail validation error: {str(e)}"
|
654
656
|
)
|
655
657
|
|
656
|
-
def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None):
|
658
|
+
def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None, task_name=None, task_description=None, task_id=None):
|
657
659
|
"""Apply guardrail validation with retry logic.
|
658
660
|
|
659
661
|
Args:
|
@@ -707,7 +709,7 @@ Your Goal: {self.goal}
|
|
707
709
|
# Regenerate response for retry
|
708
710
|
try:
|
709
711
|
retry_prompt = f"{prompt}\n\nNote: Previous response failed validation due to: {guardrail_result.error}. Please provide an improved response."
|
710
|
-
response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools)
|
712
|
+
response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools, task_name=task_name, task_description=task_description, task_id=task_id)
|
711
713
|
if response and response.choices:
|
712
714
|
current_response = response.choices[0].message.content.strip()
|
713
715
|
else:
|
@@ -1072,7 +1074,7 @@ Your Goal: {self.goal}"""
|
|
1072
1074
|
reasoning_steps=reasoning_steps
|
1073
1075
|
)
|
1074
1076
|
|
1075
|
-
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
|
1077
|
+
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
|
1076
1078
|
start_time = time.time()
|
1077
1079
|
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
1078
1080
|
|
@@ -1128,11 +1130,6 @@ Your Goal: {self.goal}"""
|
|
1128
1130
|
)
|
1129
1131
|
else:
|
1130
1132
|
# Use the standard OpenAI client approach with tool support
|
1131
|
-
def custom_display_fn(text, start_time):
|
1132
|
-
if self.verbose:
|
1133
|
-
return display_generating(text, start_time)
|
1134
|
-
return ""
|
1135
|
-
|
1136
1133
|
# Note: openai_client expects tools in various formats and will format them internally
|
1137
1134
|
# But since we already have formatted_tools, we can pass them directly
|
1138
1135
|
if self._openai_client is None:
|
@@ -1145,8 +1142,8 @@ Your Goal: {self.goal}"""
|
|
1145
1142
|
tools=formatted_tools, # Already formatted for OpenAI
|
1146
1143
|
execute_tool_fn=self.execute_tool,
|
1147
1144
|
stream=stream,
|
1148
|
-
console=self.console if self.verbose else None,
|
1149
|
-
display_fn=display_generating if stream
|
1145
|
+
console=self.console if (self.verbose or stream) else None,
|
1146
|
+
display_fn=display_generating if stream else None,
|
1150
1147
|
reasoning_steps=reasoning_steps,
|
1151
1148
|
verbose=self.verbose,
|
1152
1149
|
max_iterations=10
|
@@ -1158,7 +1155,7 @@ Your Goal: {self.goal}"""
|
|
1158
1155
|
display_error(f"Error in chat completion: {e}")
|
1159
1156
|
return None
|
1160
1157
|
|
1161
|
-
def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float):
|
1158
|
+
def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float, task_name=None, task_description=None, task_id=None):
|
1162
1159
|
"""Helper method to execute callbacks and display interaction.
|
1163
1160
|
|
1164
1161
|
This centralizes the logic for callback execution and display to avoid duplication.
|
@@ -1174,12 +1171,13 @@ Your Goal: {self.goal}"""
|
|
1174
1171
|
agent_name=self.name,
|
1175
1172
|
agent_role=self.role,
|
1176
1173
|
agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
|
1177
|
-
task_name=
|
1178
|
-
task_description=
|
1179
|
-
task_id=
|
1174
|
+
task_name=task_name,
|
1175
|
+
task_description=task_description,
|
1176
|
+
task_id=task_id
|
1180
1177
|
)
|
1181
|
-
#
|
1182
|
-
|
1178
|
+
# Always display final interaction when verbose is True to ensure consistent formatting
|
1179
|
+
# This ensures both OpenAI and custom LLM providers (like Gemini) show formatted output
|
1180
|
+
if self.verbose and not self._final_display_shown:
|
1183
1181
|
display_interaction(prompt, response, markdown=self.markdown,
|
1184
1182
|
generation_time=generation_time, console=self.console,
|
1185
1183
|
agent_name=self.name,
|
@@ -1188,8 +1186,12 @@ Your Goal: {self.goal}"""
|
|
1188
1186
|
task_name=None, # Not available in this context
|
1189
1187
|
task_description=None, # Not available in this context
|
1190
1188
|
task_id=None) # Not available in this context
|
1189
|
+
self._final_display_shown = True
|
1191
1190
|
|
1192
1191
|
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
|
1192
|
+
# Reset the final display flag for each new conversation
|
1193
|
+
self._final_display_shown = False
|
1194
|
+
|
1193
1195
|
# Log all parameter values when in debug mode
|
1194
1196
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1195
1197
|
param_info = {
|
@@ -1297,7 +1299,7 @@ Your Goal: {self.goal}"""
|
|
1297
1299
|
|
1298
1300
|
# Apply guardrail validation for custom LLM response
|
1299
1301
|
try:
|
1300
|
-
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1302
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
|
1301
1303
|
return validated_response
|
1302
1304
|
except Exception as e:
|
1303
1305
|
logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
|
@@ -1357,7 +1359,7 @@ Your Goal: {self.goal}"""
|
|
1357
1359
|
agent_tools=agent_tools
|
1358
1360
|
)
|
1359
1361
|
|
1360
|
-
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
|
1362
|
+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream, task_name=task_name, task_description=task_description, task_id=task_id)
|
1361
1363
|
if not response:
|
1362
1364
|
# Rollback chat history on response failure
|
1363
1365
|
self.chat_history = self.chat_history[:chat_history_length]
|
@@ -1372,9 +1374,9 @@ Your Goal: {self.goal}"""
|
|
1372
1374
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1373
1375
|
# Apply guardrail validation even for JSON output
|
1374
1376
|
try:
|
1375
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1377
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1376
1378
|
# Execute callback after validation
|
1377
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1379
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1378
1380
|
return validated_response
|
1379
1381
|
except Exception as e:
|
1380
1382
|
logging.error(f"Agent {self.name}: Guardrail validation failed for JSON output: {e}")
|
@@ -1391,9 +1393,9 @@ Your Goal: {self.goal}"""
|
|
1391
1393
|
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
1392
1394
|
# Apply guardrail to reasoning content
|
1393
1395
|
try:
|
1394
|
-
validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
|
1396
|
+
validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1395
1397
|
# Execute callback after validation
|
1396
|
-
self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time)
|
1398
|
+
self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time, task_name, task_description, task_id)
|
1397
1399
|
return validated_reasoning
|
1398
1400
|
except Exception as e:
|
1399
1401
|
logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
|
@@ -1402,9 +1404,9 @@ Your Goal: {self.goal}"""
|
|
1402
1404
|
return None
|
1403
1405
|
# Apply guardrail to regular response
|
1404
1406
|
try:
|
1405
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1407
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1406
1408
|
# Execute callback after validation
|
1407
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1409
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1408
1410
|
return validated_response
|
1409
1411
|
except Exception as e:
|
1410
1412
|
logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
|
@@ -1426,7 +1428,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1426
1428
|
if self._using_custom_llm or self._openai_client is None:
|
1427
1429
|
# For custom LLMs, we need to handle reflection differently
|
1428
1430
|
# Use non-streaming to get complete JSON response
|
1429
|
-
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
|
1431
|
+
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False, task_name=task_name, task_description=task_description, task_id=task_id)
|
1430
1432
|
|
1431
1433
|
if not reflection_response or not reflection_response.choices:
|
1432
1434
|
raise Exception("No response from reflection request")
|
@@ -1470,9 +1472,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1470
1472
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1471
1473
|
# Apply guardrail validation after satisfactory reflection
|
1472
1474
|
try:
|
1473
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1475
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1474
1476
|
# Execute callback after validation
|
1475
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1477
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1476
1478
|
return validated_response
|
1477
1479
|
except Exception as e:
|
1478
1480
|
logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
|
@@ -1488,9 +1490,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1488
1490
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1489
1491
|
# Apply guardrail validation after max reflections
|
1490
1492
|
try:
|
1491
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1493
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1492
1494
|
# Execute callback after validation
|
1493
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1495
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1494
1496
|
return validated_response
|
1495
1497
|
except Exception as e:
|
1496
1498
|
logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
|
@@ -1503,7 +1505,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1503
1505
|
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
1504
1506
|
# For custom LLMs during reflection, always use non-streaming to ensure complete responses
|
1505
1507
|
use_stream = self.stream if not self._using_custom_llm else False
|
1506
|
-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
|
1508
|
+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream, task_name=task_name, task_description=task_description, task_id=task_id)
|
1507
1509
|
response_text = response.choices[0].message.content.strip()
|
1508
1510
|
reflection_count += 1
|
1509
1511
|
continue # Continue the loop for more reflections
|
@@ -1538,6 +1540,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1538
1540
|
|
1539
1541
|
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
|
1540
1542
|
"""Async version of chat method with self-reflection support."""
|
1543
|
+
# Reset the final display flag for each new conversation
|
1544
|
+
self._final_display_shown = False
|
1545
|
+
|
1541
1546
|
# Log all parameter values when in debug mode
|
1542
1547
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1543
1548
|
param_info = {
|
@@ -1620,7 +1625,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1620
1625
|
|
1621
1626
|
# Apply guardrail validation for custom LLM response
|
1622
1627
|
try:
|
1623
|
-
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1628
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
|
1629
|
+
# Execute callback after validation
|
1630
|
+
self._execute_callback_and_display(normalized_content, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1624
1631
|
return validated_response
|
1625
1632
|
except Exception as e:
|
1626
1633
|
logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
|
@@ -1697,6 +1704,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1697
1704
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1698
1705
|
total_time = time.time() - start_time
|
1699
1706
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1707
|
+
# Execute callback after tool completion
|
1708
|
+
self._execute_callback_and_display(original_prompt, result, time.time() - start_time, task_name, task_description, task_id)
|
1700
1709
|
return result
|
1701
1710
|
elif output_json or output_pydantic:
|
1702
1711
|
response = await self._openai_client.async_client.chat.completions.create(
|
@@ -1705,11 +1714,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1705
1714
|
temperature=temperature,
|
1706
1715
|
response_format={"type": "json_object"}
|
1707
1716
|
)
|
1708
|
-
|
1717
|
+
response_text = response.choices[0].message.content
|
1709
1718
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1710
1719
|
total_time = time.time() - start_time
|
1711
1720
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1712
|
-
|
1721
|
+
# Execute callback after JSON/Pydantic completion
|
1722
|
+
self._execute_callback_and_display(original_prompt, response_text, time.time() - start_time, task_name, task_description, task_id)
|
1723
|
+
return response_text
|
1713
1724
|
else:
|
1714
1725
|
response = await self._openai_client.async_client.chat.completions.create(
|
1715
1726
|
model=self.llm,
|
@@ -1804,7 +1815,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1804
1815
|
|
1805
1816
|
# Apply guardrail validation for OpenAI client response
|
1806
1817
|
try:
|
1807
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1818
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1819
|
+
# Execute callback after validation
|
1820
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1808
1821
|
return validated_response
|
1809
1822
|
except Exception as e:
|
1810
1823
|
logging.error(f"Agent {self.name}: Guardrail validation failed for OpenAI client: {e}")
|
praisonaiagents/llm/llm.py
CHANGED
@@ -329,16 +329,29 @@ class LLM:
|
|
329
329
|
# For Ollama, always generate summary when we have tool results
|
330
330
|
# This prevents infinite loops caused by empty/minimal responses
|
331
331
|
|
332
|
-
# Build tool summary
|
333
|
-
|
334
|
-
|
332
|
+
# Build tool summary more naturally to match OpenAI-style responses
|
333
|
+
if len(tool_results) == 1:
|
334
|
+
# Single tool result - create natural response
|
335
|
+
result = tool_results[0]
|
335
336
|
if isinstance(result, dict) and 'result' in result:
|
336
|
-
|
337
|
-
summary_lines.append(f"- {function_name}: {result['result']}")
|
337
|
+
return str(result['result'])
|
338
338
|
else:
|
339
|
-
|
340
|
-
|
341
|
-
|
339
|
+
return str(result)
|
340
|
+
else:
|
341
|
+
# Multiple tool results - create coherent summary
|
342
|
+
summary_lines = []
|
343
|
+
for i, result in enumerate(tool_results):
|
344
|
+
if isinstance(result, dict) and 'result' in result:
|
345
|
+
function_name = result.get('function_name', 'Tool')
|
346
|
+
summary_lines.append(f"{function_name}: {result['result']}")
|
347
|
+
else:
|
348
|
+
summary_lines.append(f"Tool {i+1}: {result}")
|
349
|
+
|
350
|
+
# Create more natural summary text
|
351
|
+
if len(summary_lines) == 2:
|
352
|
+
return f"{summary_lines[0]}. {summary_lines[1]}."
|
353
|
+
else:
|
354
|
+
return "Based on the tool execution: " + ". ".join(summary_lines) + "."
|
342
355
|
|
343
356
|
def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
|
344
357
|
"""
|
@@ -477,6 +490,49 @@ class LLM:
|
|
477
490
|
logging.debug(f"[OLLAMA_FIX] Error validating arguments for {function_name}: {e}")
|
478
491
|
return arguments
|
479
492
|
|
493
|
+
def _handle_ollama_sequential_logic(self, iteration_count: int, accumulated_tool_results: List[Any],
|
494
|
+
response_text: str, messages: List[Dict]) -> tuple:
|
495
|
+
"""
|
496
|
+
Handle Ollama sequential tool execution logic to prevent premature tool summary generation.
|
497
|
+
|
498
|
+
This method implements the two-step process:
|
499
|
+
1. After reaching threshold with tool results, add explicit final answer prompt
|
500
|
+
2. Only generate tool summary if LLM still doesn't respond after explicit prompt
|
501
|
+
|
502
|
+
Args:
|
503
|
+
iteration_count: Current iteration count
|
504
|
+
accumulated_tool_results: List of tool results from all iterations
|
505
|
+
response_text: Current LLM response text
|
506
|
+
messages: Message history list to potentially modify
|
507
|
+
|
508
|
+
Returns:
|
509
|
+
tuple: (should_break, final_response_text, iteration_count)
|
510
|
+
- should_break: Whether to break the iteration loop
|
511
|
+
- final_response_text: Text to use as final response (None if continuing)
|
512
|
+
- iteration_count: Updated iteration count
|
513
|
+
"""
|
514
|
+
if not (self._is_ollama_provider() and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD):
|
515
|
+
return False, None, iteration_count
|
516
|
+
|
517
|
+
# For Ollama: if we have meaningful tool results but empty responses,
|
518
|
+
# give LLM one final chance with explicit prompt for final answer
|
519
|
+
if accumulated_tool_results and iteration_count == self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
|
520
|
+
# Add explicit prompt asking for final answer
|
521
|
+
messages.append({
|
522
|
+
"role": "user",
|
523
|
+
"content": self.OLLAMA_FINAL_ANSWER_PROMPT
|
524
|
+
})
|
525
|
+
# Continue to next iteration to get the final response
|
526
|
+
iteration_count += 1
|
527
|
+
return False, None, iteration_count
|
528
|
+
else:
|
529
|
+
# If still no response after final answer prompt, generate summary
|
530
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
531
|
+
if tool_summary:
|
532
|
+
return True, tool_summary, iteration_count
|
533
|
+
|
534
|
+
return False, None, iteration_count
|
535
|
+
|
480
536
|
def _needs_system_message_skip(self) -> bool:
|
481
537
|
"""Check if this model requires skipping system messages"""
|
482
538
|
if not self.model:
|
@@ -1130,13 +1186,18 @@ class LLM:
|
|
1130
1186
|
final_response_text = response_text.strip()
|
1131
1187
|
break
|
1132
1188
|
|
1189
|
+
|
1133
1190
|
# Special handling for Ollama to prevent infinite loops
|
1134
1191
|
# Only generate summary after multiple iterations to allow sequential execution
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1192
|
+
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
1193
|
+
iteration_count, accumulated_tool_results, response_text, messages
|
1194
|
+
)
|
1195
|
+
if should_break:
|
1196
|
+
final_response_text = tool_summary_text
|
1197
|
+
break
|
1198
|
+
elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
|
1199
|
+
# Continue iteration after adding final answer prompt
|
1200
|
+
continue
|
1140
1201
|
|
1141
1202
|
# Safety check: prevent infinite loops for any provider
|
1142
1203
|
if iteration_count >= 5:
|
@@ -1151,6 +1212,17 @@ class LLM:
|
|
1151
1212
|
continue
|
1152
1213
|
else:
|
1153
1214
|
# No tool calls, we're done with this iteration
|
1215
|
+
|
1216
|
+
# Special early stopping logic for Ollama when tool results are available
|
1217
|
+
# Ollama often provides empty responses after successful tool execution
|
1218
|
+
if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
|
1219
|
+
(not response_text or response_text.strip() == "")):
|
1220
|
+
# Generate coherent response from tool results
|
1221
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
1222
|
+
if tool_summary:
|
1223
|
+
final_response_text = tool_summary
|
1224
|
+
break
|
1225
|
+
|
1154
1226
|
# If we've executed tools in previous iterations, this response contains the final answer
|
1155
1227
|
if iteration_count > 0 and not final_response_text:
|
1156
1228
|
final_response_text = response_text.strip() if response_text else ""
|
@@ -1909,13 +1981,18 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1909
1981
|
final_response_text = response_text.strip()
|
1910
1982
|
break
|
1911
1983
|
|
1984
|
+
|
1912
1985
|
# Special handling for Ollama to prevent infinite loops
|
1913
1986
|
# Only generate summary after multiple iterations to allow sequential execution
|
1914
|
-
|
1915
|
-
|
1916
|
-
|
1917
|
-
|
1918
|
-
|
1987
|
+
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
1988
|
+
iteration_count, accumulated_tool_results, response_text, messages
|
1989
|
+
)
|
1990
|
+
if should_break:
|
1991
|
+
final_response_text = tool_summary_text
|
1992
|
+
break
|
1993
|
+
elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
|
1994
|
+
# Continue iteration after adding final answer prompt
|
1995
|
+
continue
|
1919
1996
|
|
1920
1997
|
# Safety check: prevent infinite loops for any provider
|
1921
1998
|
if iteration_count >= 5:
|
@@ -1930,6 +2007,17 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1930
2007
|
continue
|
1931
2008
|
else:
|
1932
2009
|
# No tool calls, we're done with this iteration
|
2010
|
+
|
2011
|
+
# Special early stopping logic for Ollama when tool results are available
|
2012
|
+
# Ollama often provides empty responses after successful tool execution
|
2013
|
+
if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
|
2014
|
+
(not response_text or response_text.strip() == "")):
|
2015
|
+
# Generate coherent response from tool results
|
2016
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
2017
|
+
if tool_summary:
|
2018
|
+
final_response_text = tool_summary
|
2019
|
+
break
|
2020
|
+
|
1933
2021
|
# If we've executed tools in previous iterations, this response contains the final answer
|
1934
2022
|
if iteration_count > 0 and not final_response_text:
|
1935
2023
|
final_response_text = response_text.strip()
|
@@ -2417,18 +2505,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2417
2505
|
)
|
2418
2506
|
|
2419
2507
|
if stream:
|
2420
|
-
|
2421
|
-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2422
|
-
for chunk in litellm.completion(**completion_params):
|
2423
|
-
content = self._process_streaming_chunk(chunk)
|
2424
|
-
if content:
|
2425
|
-
response_text += content
|
2426
|
-
live.update(display_generating(response_text, start_time))
|
2427
|
-
else:
|
2508
|
+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2428
2509
|
for chunk in litellm.completion(**completion_params):
|
2429
2510
|
content = self._process_streaming_chunk(chunk)
|
2430
2511
|
if content:
|
2431
2512
|
response_text += content
|
2513
|
+
live.update(display_generating(response_text, start_time))
|
2514
|
+
if content:
|
2515
|
+
response_text += content
|
2432
2516
|
else:
|
2433
2517
|
response = litellm.completion(**completion_params)
|
2434
2518
|
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
@@ -2517,18 +2601,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2517
2601
|
)
|
2518
2602
|
|
2519
2603
|
if stream:
|
2520
|
-
|
2521
|
-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2522
|
-
async for chunk in await litellm.acompletion(**completion_params):
|
2523
|
-
content = self._process_streaming_chunk(chunk)
|
2524
|
-
if content:
|
2525
|
-
response_text += content
|
2526
|
-
live.update(display_generating(response_text, start_time))
|
2527
|
-
else:
|
2604
|
+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2528
2605
|
async for chunk in await litellm.acompletion(**completion_params):
|
2529
2606
|
content = self._process_streaming_chunk(chunk)
|
2530
2607
|
if content:
|
2531
2608
|
response_text += content
|
2609
|
+
live.update(display_generating(response_text, start_time))
|
2610
|
+
if content:
|
2611
|
+
response_text += content
|
2532
2612
|
else:
|
2533
2613
|
response = await litellm.acompletion(**completion_params)
|
2534
2614
|
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
@@ -65,8 +65,8 @@ def _ensure_atexit():
|
|
65
65
|
])
|
66
66
|
|
67
67
|
if not telemetry_disabled:
|
68
|
-
# Register atexit handler to
|
69
|
-
atexit.register(lambda: get_telemetry().
|
68
|
+
# Register atexit handler to properly shutdown telemetry on exit
|
69
|
+
atexit.register(lambda: get_telemetry().shutdown())
|
70
70
|
_atexit_registered = True
|
71
71
|
|
72
72
|
def _initialize_telemetry():
|
@@ -354,8 +354,8 @@ class TelemetryCollector:
|
|
354
354
|
pass
|
355
355
|
|
356
356
|
def stop(self):
|
357
|
-
"""Stop telemetry collection and
|
358
|
-
self.telemetry.
|
357
|
+
"""Stop telemetry collection and properly shutdown."""
|
358
|
+
self.telemetry.shutdown()
|
359
359
|
|
360
360
|
def trace_agent_execution(self, agent_name: str, **attributes):
|
361
361
|
"""Compatibility method for agent execution tracking."""
|
@@ -3,7 +3,7 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
|
|
3
3
|
praisonaiagents/main.py,sha256=b5dKlkf6NMeumSzixreHB9ui90f8YMAi5r1fCbTpQVw,17225
|
4
4
|
praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=FkjW6f3EU8heQ9tvctfLbOWV9_dOXmS1PcFNgcStns8,403
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=1wI2Ohp9evUza7qlZt3yIu4goroR8Jm8EIenXaOpDso,125805
|
7
7
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
8
8
|
praisonaiagents/agent/image_agent.py,sha256=Bbwg_h3qhjhG7gMH8sdcQXhcOFgE_wSvcdhtqH5f2UM,9145
|
9
9
|
praisonaiagents/agent/router_agent.py,sha256=a_b6w5Ti05gvK80uKGMIcT14fiCTKv8rCQPCWAUfIiE,12713
|
@@ -17,7 +17,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
17
17
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
18
18
|
praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
|
19
19
|
praisonaiagents/llm/__init__.py,sha256=tHvWq5mv4K4MhWr0s6rqox8UnJ5RK0kXhYuD40WkZQA,1747
|
20
|
-
praisonaiagents/llm/llm.py,sha256=
|
20
|
+
praisonaiagents/llm/llm.py,sha256=vBw810jpgjZyVAvHcGAG0-QpdbL5e2DBwjn8qgE5NXc,136663
|
21
21
|
praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
|
22
22
|
praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
|
23
23
|
praisonaiagents/llm/openai_client.py,sha256=EgWjkDjVpnLKCp1gBFjccDGyqR1anOcSYJYCo45fuEI,46046
|
@@ -31,9 +31,9 @@ praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0
|
|
31
31
|
praisonaiagents/process/process.py,sha256=wXKZ2Z26vB9osmVbD5xqkUlUQRvWEpvL8j9hiuiHrQ0,78246
|
32
32
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
33
33
|
praisonaiagents/task/task.py,sha256=-EXxw3czWZdAK1WWI6Dvga5CujsItgk9RWYD_CdW47w,24075
|
34
|
-
praisonaiagents/telemetry/__init__.py,sha256=
|
34
|
+
praisonaiagents/telemetry/__init__.py,sha256=x66unQefO4Rt_kAC1CyuRtq2txbJ5pqEXvoaCncha90,3077
|
35
35
|
praisonaiagents/telemetry/integration.py,sha256=8h8TDlPFTbsBmU5rIYNOibJbwEEEWmzS1ENE9uPTvvg,8696
|
36
|
-
praisonaiagents/telemetry/telemetry.py,sha256=
|
36
|
+
praisonaiagents/telemetry/telemetry.py,sha256=jVtUjXDRvRLIZa9TiAHdTNec08pqpYJLx26_CiiDXSc,13469
|
37
37
|
praisonaiagents/tools/README.md,sha256=am9mlHp46sC1U9HfyXtX-E_cckxpazprl4tuVFYHP_0,4905
|
38
38
|
praisonaiagents/tools/__init__.py,sha256=9NYh9anzJZlaLtrRINdM1uD6JfNSuOzZAFMaarO6yAU,9321
|
39
39
|
praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
|
@@ -57,7 +57,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
57
57
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
58
58
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
59
59
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
60
|
-
praisonaiagents-0.0.
|
61
|
-
praisonaiagents-0.0.
|
62
|
-
praisonaiagents-0.0.
|
63
|
-
praisonaiagents-0.0.
|
60
|
+
praisonaiagents-0.0.142.dist-info/METADATA,sha256=LlCoVIYbTPQrfhx1mkLuxt1-PGYAdWLo-DRMjm8xiMk,1673
|
61
|
+
praisonaiagents-0.0.142.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
62
|
+
praisonaiagents-0.0.142.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
63
|
+
praisonaiagents-0.0.142.dist-info/RECORD,,
|
File without changes
|
File without changes
|