praisonaiagents 0.0.141__py3-none-any.whl → 0.0.142__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +15 -5
- praisonaiagents/llm/llm.py +45 -8
- praisonaiagents/telemetry/__init__.py +2 -2
- praisonaiagents/telemetry/telemetry.py +2 -2
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.142.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.142.dist-info}/RECORD +8 -8
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.142.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.141.dist-info → praisonaiagents-0.0.142.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -354,6 +354,8 @@ class Agent:
|
|
354
354
|
self.instructions = instructions
|
355
355
|
# Check for model name in environment variable if not provided
|
356
356
|
self._using_custom_llm = False
|
357
|
+
# Flag to track if final result has been displayed to prevent duplicates
|
358
|
+
self._final_display_shown = False
|
357
359
|
|
358
360
|
# Store OpenAI client parameters for lazy initialization
|
359
361
|
self._openai_api_key = api_key
|
@@ -1173,18 +1175,23 @@ Your Goal: {self.goal}"""
|
|
1173
1175
|
task_description=task_description,
|
1174
1176
|
task_id=task_id
|
1175
1177
|
)
|
1176
|
-
#
|
1177
|
-
|
1178
|
+
# Always display final interaction when verbose is True to ensure consistent formatting
|
1179
|
+
# This ensures both OpenAI and custom LLM providers (like Gemini) show formatted output
|
1180
|
+
if self.verbose and not self._final_display_shown:
|
1178
1181
|
display_interaction(prompt, response, markdown=self.markdown,
|
1179
1182
|
generation_time=generation_time, console=self.console,
|
1180
1183
|
agent_name=self.name,
|
1181
1184
|
agent_role=self.role,
|
1182
1185
|
agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
|
1183
|
-
task_name=
|
1184
|
-
task_description=
|
1185
|
-
task_id=
|
1186
|
+
task_name=None, # Not available in this context
|
1187
|
+
task_description=None, # Not available in this context
|
1188
|
+
task_id=None) # Not available in this context
|
1189
|
+
self._final_display_shown = True
|
1186
1190
|
|
1187
1191
|
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
|
1192
|
+
# Reset the final display flag for each new conversation
|
1193
|
+
self._final_display_shown = False
|
1194
|
+
|
1188
1195
|
# Log all parameter values when in debug mode
|
1189
1196
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1190
1197
|
param_info = {
|
@@ -1533,6 +1540,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1533
1540
|
|
1534
1541
|
async def achat(self, prompt: str, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
|
1535
1542
|
"""Async version of chat method with self-reflection support."""
|
1543
|
+
# Reset the final display flag for each new conversation
|
1544
|
+
self._final_display_shown = False
|
1545
|
+
|
1536
1546
|
# Log all parameter values when in debug mode
|
1537
1547
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1538
1548
|
param_info = {
|
praisonaiagents/llm/llm.py
CHANGED
@@ -329,16 +329,29 @@ class LLM:
|
|
329
329
|
# For Ollama, always generate summary when we have tool results
|
330
330
|
# This prevents infinite loops caused by empty/minimal responses
|
331
331
|
|
332
|
-
# Build tool summary
|
333
|
-
|
334
|
-
|
332
|
+
# Build tool summary more naturally to match OpenAI-style responses
|
333
|
+
if len(tool_results) == 1:
|
334
|
+
# Single tool result - create natural response
|
335
|
+
result = tool_results[0]
|
335
336
|
if isinstance(result, dict) and 'result' in result:
|
336
|
-
|
337
|
-
summary_lines.append(f"- {function_name}: {result['result']}")
|
337
|
+
return str(result['result'])
|
338
338
|
else:
|
339
|
-
|
340
|
-
|
341
|
-
|
339
|
+
return str(result)
|
340
|
+
else:
|
341
|
+
# Multiple tool results - create coherent summary
|
342
|
+
summary_lines = []
|
343
|
+
for i, result in enumerate(tool_results):
|
344
|
+
if isinstance(result, dict) and 'result' in result:
|
345
|
+
function_name = result.get('function_name', 'Tool')
|
346
|
+
summary_lines.append(f"{function_name}: {result['result']}")
|
347
|
+
else:
|
348
|
+
summary_lines.append(f"Tool {i+1}: {result}")
|
349
|
+
|
350
|
+
# Create more natural summary text
|
351
|
+
if len(summary_lines) == 2:
|
352
|
+
return f"{summary_lines[0]}. {summary_lines[1]}."
|
353
|
+
else:
|
354
|
+
return "Based on the tool execution: " + ". ".join(summary_lines) + "."
|
342
355
|
|
343
356
|
def _format_ollama_tool_result_message(self, function_name: str, tool_result: Any) -> Dict[str, str]:
|
344
357
|
"""
|
@@ -1173,6 +1186,7 @@ class LLM:
|
|
1173
1186
|
final_response_text = response_text.strip()
|
1174
1187
|
break
|
1175
1188
|
|
1189
|
+
|
1176
1190
|
# Special handling for Ollama to prevent infinite loops
|
1177
1191
|
# Only generate summary after multiple iterations to allow sequential execution
|
1178
1192
|
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
@@ -1198,6 +1212,17 @@ class LLM:
|
|
1198
1212
|
continue
|
1199
1213
|
else:
|
1200
1214
|
# No tool calls, we're done with this iteration
|
1215
|
+
|
1216
|
+
# Special early stopping logic for Ollama when tool results are available
|
1217
|
+
# Ollama often provides empty responses after successful tool execution
|
1218
|
+
if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
|
1219
|
+
(not response_text or response_text.strip() == "")):
|
1220
|
+
# Generate coherent response from tool results
|
1221
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
1222
|
+
if tool_summary:
|
1223
|
+
final_response_text = tool_summary
|
1224
|
+
break
|
1225
|
+
|
1201
1226
|
# If we've executed tools in previous iterations, this response contains the final answer
|
1202
1227
|
if iteration_count > 0 and not final_response_text:
|
1203
1228
|
final_response_text = response_text.strip() if response_text else ""
|
@@ -1956,6 +1981,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1956
1981
|
final_response_text = response_text.strip()
|
1957
1982
|
break
|
1958
1983
|
|
1984
|
+
|
1959
1985
|
# Special handling for Ollama to prevent infinite loops
|
1960
1986
|
# Only generate summary after multiple iterations to allow sequential execution
|
1961
1987
|
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
@@ -1981,6 +2007,17 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1981
2007
|
continue
|
1982
2008
|
else:
|
1983
2009
|
# No tool calls, we're done with this iteration
|
2010
|
+
|
2011
|
+
# Special early stopping logic for Ollama when tool results are available
|
2012
|
+
# Ollama often provides empty responses after successful tool execution
|
2013
|
+
if (self._is_ollama_provider() and accumulated_tool_results and iteration_count >= 1 and
|
2014
|
+
(not response_text or response_text.strip() == "")):
|
2015
|
+
# Generate coherent response from tool results
|
2016
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
2017
|
+
if tool_summary:
|
2018
|
+
final_response_text = tool_summary
|
2019
|
+
break
|
2020
|
+
|
1984
2021
|
# If we've executed tools in previous iterations, this response contains the final answer
|
1985
2022
|
if iteration_count > 0 and not final_response_text:
|
1986
2023
|
final_response_text = response_text.strip()
|
@@ -65,8 +65,8 @@ def _ensure_atexit():
|
|
65
65
|
])
|
66
66
|
|
67
67
|
if not telemetry_disabled:
|
68
|
-
# Register atexit handler to
|
69
|
-
atexit.register(lambda: get_telemetry().
|
68
|
+
# Register atexit handler to properly shutdown telemetry on exit
|
69
|
+
atexit.register(lambda: get_telemetry().shutdown())
|
70
70
|
_atexit_registered = True
|
71
71
|
|
72
72
|
def _initialize_telemetry():
|
@@ -354,8 +354,8 @@ class TelemetryCollector:
|
|
354
354
|
pass
|
355
355
|
|
356
356
|
def stop(self):
|
357
|
-
"""Stop telemetry collection and
|
358
|
-
self.telemetry.
|
357
|
+
"""Stop telemetry collection and properly shutdown."""
|
358
|
+
self.telemetry.shutdown()
|
359
359
|
|
360
360
|
def trace_agent_execution(self, agent_name: str, **attributes):
|
361
361
|
"""Compatibility method for agent execution tracking."""
|
@@ -3,7 +3,7 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
|
|
3
3
|
praisonaiagents/main.py,sha256=b5dKlkf6NMeumSzixreHB9ui90f8YMAi5r1fCbTpQVw,17225
|
4
4
|
praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=FkjW6f3EU8heQ9tvctfLbOWV9_dOXmS1PcFNgcStns8,403
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=1wI2Ohp9evUza7qlZt3yIu4goroR8Jm8EIenXaOpDso,125805
|
7
7
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
8
8
|
praisonaiagents/agent/image_agent.py,sha256=Bbwg_h3qhjhG7gMH8sdcQXhcOFgE_wSvcdhtqH5f2UM,9145
|
9
9
|
praisonaiagents/agent/router_agent.py,sha256=a_b6w5Ti05gvK80uKGMIcT14fiCTKv8rCQPCWAUfIiE,12713
|
@@ -17,7 +17,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
17
17
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
18
18
|
praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
|
19
19
|
praisonaiagents/llm/__init__.py,sha256=tHvWq5mv4K4MhWr0s6rqox8UnJ5RK0kXhYuD40WkZQA,1747
|
20
|
-
praisonaiagents/llm/llm.py,sha256=
|
20
|
+
praisonaiagents/llm/llm.py,sha256=vBw810jpgjZyVAvHcGAG0-QpdbL5e2DBwjn8qgE5NXc,136663
|
21
21
|
praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
|
22
22
|
praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
|
23
23
|
praisonaiagents/llm/openai_client.py,sha256=EgWjkDjVpnLKCp1gBFjccDGyqR1anOcSYJYCo45fuEI,46046
|
@@ -31,9 +31,9 @@ praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0
|
|
31
31
|
praisonaiagents/process/process.py,sha256=wXKZ2Z26vB9osmVbD5xqkUlUQRvWEpvL8j9hiuiHrQ0,78246
|
32
32
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
33
33
|
praisonaiagents/task/task.py,sha256=-EXxw3czWZdAK1WWI6Dvga5CujsItgk9RWYD_CdW47w,24075
|
34
|
-
praisonaiagents/telemetry/__init__.py,sha256=
|
34
|
+
praisonaiagents/telemetry/__init__.py,sha256=x66unQefO4Rt_kAC1CyuRtq2txbJ5pqEXvoaCncha90,3077
|
35
35
|
praisonaiagents/telemetry/integration.py,sha256=8h8TDlPFTbsBmU5rIYNOibJbwEEEWmzS1ENE9uPTvvg,8696
|
36
|
-
praisonaiagents/telemetry/telemetry.py,sha256=
|
36
|
+
praisonaiagents/telemetry/telemetry.py,sha256=jVtUjXDRvRLIZa9TiAHdTNec08pqpYJLx26_CiiDXSc,13469
|
37
37
|
praisonaiagents/tools/README.md,sha256=am9mlHp46sC1U9HfyXtX-E_cckxpazprl4tuVFYHP_0,4905
|
38
38
|
praisonaiagents/tools/__init__.py,sha256=9NYh9anzJZlaLtrRINdM1uD6JfNSuOzZAFMaarO6yAU,9321
|
39
39
|
praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
|
@@ -57,7 +57,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
57
57
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
58
58
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
59
59
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
60
|
-
praisonaiagents-0.0.
|
61
|
-
praisonaiagents-0.0.
|
62
|
-
praisonaiagents-0.0.
|
63
|
-
praisonaiagents-0.0.
|
60
|
+
praisonaiagents-0.0.142.dist-info/METADATA,sha256=LlCoVIYbTPQrfhx1mkLuxt1-PGYAdWLo-DRMjm8xiMk,1673
|
61
|
+
praisonaiagents-0.0.142.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
62
|
+
praisonaiagents-0.0.142.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
63
|
+
praisonaiagents-0.0.142.dist-info/RECORD,,
|
File without changes
|
File without changes
|