praisonaiagents 0.0.140__py3-none-any.whl → 0.0.141__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +38 -35
- praisonaiagents/llm/llm.py +69 -26
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.141.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.141.dist-info}/RECORD +6 -6
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.141.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.140.dist-info → praisonaiagents-0.0.141.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -653,7 +653,7 @@ Your Goal: {self.goal}
|
|
653
653
|
error=f"Agent guardrail validation error: {str(e)}"
|
654
654
|
)
|
655
655
|
|
656
|
-
def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None):
|
656
|
+
def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None, task_name=None, task_description=None, task_id=None):
|
657
657
|
"""Apply guardrail validation with retry logic.
|
658
658
|
|
659
659
|
Args:
|
@@ -707,7 +707,7 @@ Your Goal: {self.goal}
|
|
707
707
|
# Regenerate response for retry
|
708
708
|
try:
|
709
709
|
retry_prompt = f"{prompt}\n\nNote: Previous response failed validation due to: {guardrail_result.error}. Please provide an improved response."
|
710
|
-
response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools)
|
710
|
+
response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools, task_name=task_name, task_description=task_description, task_id=task_id)
|
711
711
|
if response and response.choices:
|
712
712
|
current_response = response.choices[0].message.content.strip()
|
713
713
|
else:
|
@@ -1072,7 +1072,7 @@ Your Goal: {self.goal}"""
|
|
1072
1072
|
reasoning_steps=reasoning_steps
|
1073
1073
|
)
|
1074
1074
|
|
1075
|
-
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False):
|
1075
|
+
def _chat_completion(self, messages, temperature=0.2, tools=None, stream=True, reasoning_steps=False, task_name=None, task_description=None, task_id=None):
|
1076
1076
|
start_time = time.time()
|
1077
1077
|
logging.debug(f"{self.name} sending messages to LLM: {messages}")
|
1078
1078
|
|
@@ -1128,11 +1128,6 @@ Your Goal: {self.goal}"""
|
|
1128
1128
|
)
|
1129
1129
|
else:
|
1130
1130
|
# Use the standard OpenAI client approach with tool support
|
1131
|
-
def custom_display_fn(text, start_time):
|
1132
|
-
if self.verbose:
|
1133
|
-
return display_generating(text, start_time)
|
1134
|
-
return ""
|
1135
|
-
|
1136
1131
|
# Note: openai_client expects tools in various formats and will format them internally
|
1137
1132
|
# But since we already have formatted_tools, we can pass them directly
|
1138
1133
|
if self._openai_client is None:
|
@@ -1145,8 +1140,8 @@ Your Goal: {self.goal}"""
|
|
1145
1140
|
tools=formatted_tools, # Already formatted for OpenAI
|
1146
1141
|
execute_tool_fn=self.execute_tool,
|
1147
1142
|
stream=stream,
|
1148
|
-
console=self.console if self.verbose else None,
|
1149
|
-
display_fn=display_generating if stream
|
1143
|
+
console=self.console if (self.verbose or stream) else None,
|
1144
|
+
display_fn=display_generating if stream else None,
|
1150
1145
|
reasoning_steps=reasoning_steps,
|
1151
1146
|
verbose=self.verbose,
|
1152
1147
|
max_iterations=10
|
@@ -1158,7 +1153,7 @@ Your Goal: {self.goal}"""
|
|
1158
1153
|
display_error(f"Error in chat completion: {e}")
|
1159
1154
|
return None
|
1160
1155
|
|
1161
|
-
def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float):
|
1156
|
+
def _execute_callback_and_display(self, prompt: str, response: str, generation_time: float, task_name=None, task_description=None, task_id=None):
|
1162
1157
|
"""Helper method to execute callbacks and display interaction.
|
1163
1158
|
|
1164
1159
|
This centralizes the logic for callback execution and display to avoid duplication.
|
@@ -1174,9 +1169,9 @@ Your Goal: {self.goal}"""
|
|
1174
1169
|
agent_name=self.name,
|
1175
1170
|
agent_role=self.role,
|
1176
1171
|
agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
|
1177
|
-
task_name=
|
1178
|
-
task_description=
|
1179
|
-
task_id=
|
1172
|
+
task_name=task_name,
|
1173
|
+
task_description=task_description,
|
1174
|
+
task_id=task_id
|
1180
1175
|
)
|
1181
1176
|
# Only display interaction if not using custom LLM (to avoid double output) and verbose is True
|
1182
1177
|
if self.verbose and not self._using_custom_llm:
|
@@ -1185,9 +1180,9 @@ Your Goal: {self.goal}"""
|
|
1185
1180
|
agent_name=self.name,
|
1186
1181
|
agent_role=self.role,
|
1187
1182
|
agent_tools=[t.__name__ for t in self.tools] if self.tools else None,
|
1188
|
-
task_name=
|
1189
|
-
task_description=
|
1190
|
-
task_id=
|
1183
|
+
task_name=task_name,
|
1184
|
+
task_description=task_description,
|
1185
|
+
task_id=task_id)
|
1191
1186
|
|
1192
1187
|
def chat(self, prompt, temperature=0.2, tools=None, output_json=None, output_pydantic=None, reasoning_steps=False, stream=True, task_name=None, task_description=None, task_id=None):
|
1193
1188
|
# Log all parameter values when in debug mode
|
@@ -1297,7 +1292,7 @@ Your Goal: {self.goal}"""
|
|
1297
1292
|
|
1298
1293
|
# Apply guardrail validation for custom LLM response
|
1299
1294
|
try:
|
1300
|
-
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1295
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
|
1301
1296
|
return validated_response
|
1302
1297
|
except Exception as e:
|
1303
1298
|
logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
|
@@ -1357,7 +1352,7 @@ Your Goal: {self.goal}"""
|
|
1357
1352
|
agent_tools=agent_tools
|
1358
1353
|
)
|
1359
1354
|
|
1360
|
-
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream)
|
1355
|
+
response = self._chat_completion(messages, temperature=temperature, tools=tools if tools else None, reasoning_steps=reasoning_steps, stream=self.stream, task_name=task_name, task_description=task_description, task_id=task_id)
|
1361
1356
|
if not response:
|
1362
1357
|
# Rollback chat history on response failure
|
1363
1358
|
self.chat_history = self.chat_history[:chat_history_length]
|
@@ -1372,9 +1367,9 @@ Your Goal: {self.goal}"""
|
|
1372
1367
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1373
1368
|
# Apply guardrail validation even for JSON output
|
1374
1369
|
try:
|
1375
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1370
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1376
1371
|
# Execute callback after validation
|
1377
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1372
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1378
1373
|
return validated_response
|
1379
1374
|
except Exception as e:
|
1380
1375
|
logging.error(f"Agent {self.name}: Guardrail validation failed for JSON output: {e}")
|
@@ -1391,9 +1386,9 @@ Your Goal: {self.goal}"""
|
|
1391
1386
|
if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
|
1392
1387
|
# Apply guardrail to reasoning content
|
1393
1388
|
try:
|
1394
|
-
validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
|
1389
|
+
validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1395
1390
|
# Execute callback after validation
|
1396
|
-
self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time)
|
1391
|
+
self._execute_callback_and_display(original_prompt, validated_reasoning, time.time() - start_time, task_name, task_description, task_id)
|
1397
1392
|
return validated_reasoning
|
1398
1393
|
except Exception as e:
|
1399
1394
|
logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
|
@@ -1402,9 +1397,9 @@ Your Goal: {self.goal}"""
|
|
1402
1397
|
return None
|
1403
1398
|
# Apply guardrail to regular response
|
1404
1399
|
try:
|
1405
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1400
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1406
1401
|
# Execute callback after validation
|
1407
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1402
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1408
1403
|
return validated_response
|
1409
1404
|
except Exception as e:
|
1410
1405
|
logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
|
@@ -1426,7 +1421,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1426
1421
|
if self._using_custom_llm or self._openai_client is None:
|
1427
1422
|
# For custom LLMs, we need to handle reflection differently
|
1428
1423
|
# Use non-streaming to get complete JSON response
|
1429
|
-
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False)
|
1424
|
+
reflection_response = self._chat_completion(messages, temperature=temperature, tools=None, stream=False, reasoning_steps=False, task_name=task_name, task_description=task_description, task_id=task_id)
|
1430
1425
|
|
1431
1426
|
if not reflection_response or not reflection_response.choices:
|
1432
1427
|
raise Exception("No response from reflection request")
|
@@ -1470,9 +1465,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1470
1465
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1471
1466
|
# Apply guardrail validation after satisfactory reflection
|
1472
1467
|
try:
|
1473
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1468
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1474
1469
|
# Execute callback after validation
|
1475
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1470
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1476
1471
|
return validated_response
|
1477
1472
|
except Exception as e:
|
1478
1473
|
logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
|
@@ -1488,9 +1483,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1488
1483
|
self.chat_history.append({"role": "assistant", "content": response_text})
|
1489
1484
|
# Apply guardrail validation after max reflections
|
1490
1485
|
try:
|
1491
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1486
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1492
1487
|
# Execute callback after validation
|
1493
|
-
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time)
|
1488
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1494
1489
|
return validated_response
|
1495
1490
|
except Exception as e:
|
1496
1491
|
logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
|
@@ -1503,7 +1498,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1503
1498
|
messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
|
1504
1499
|
# For custom LLMs during reflection, always use non-streaming to ensure complete responses
|
1505
1500
|
use_stream = self.stream if not self._using_custom_llm else False
|
1506
|
-
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream)
|
1501
|
+
response = self._chat_completion(messages, temperature=temperature, tools=None, stream=use_stream, task_name=task_name, task_description=task_description, task_id=task_id)
|
1507
1502
|
response_text = response.choices[0].message.content.strip()
|
1508
1503
|
reflection_count += 1
|
1509
1504
|
continue # Continue the loop for more reflections
|
@@ -1620,7 +1615,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1620
1615
|
|
1621
1616
|
# Apply guardrail validation for custom LLM response
|
1622
1617
|
try:
|
1623
|
-
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
|
1618
|
+
validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools, task_name, task_description, task_id)
|
1619
|
+
# Execute callback after validation
|
1620
|
+
self._execute_callback_and_display(normalized_content, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1624
1621
|
return validated_response
|
1625
1622
|
except Exception as e:
|
1626
1623
|
logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
|
@@ -1697,6 +1694,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1697
1694
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1698
1695
|
total_time = time.time() - start_time
|
1699
1696
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1697
|
+
# Execute callback after tool completion
|
1698
|
+
self._execute_callback_and_display(original_prompt, result, time.time() - start_time, task_name, task_description, task_id)
|
1700
1699
|
return result
|
1701
1700
|
elif output_json or output_pydantic:
|
1702
1701
|
response = await self._openai_client.async_client.chat.completions.create(
|
@@ -1705,11 +1704,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1705
1704
|
temperature=temperature,
|
1706
1705
|
response_format={"type": "json_object"}
|
1707
1706
|
)
|
1708
|
-
|
1707
|
+
response_text = response.choices[0].message.content
|
1709
1708
|
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1710
1709
|
total_time = time.time() - start_time
|
1711
1710
|
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1712
|
-
|
1711
|
+
# Execute callback after JSON/Pydantic completion
|
1712
|
+
self._execute_callback_and_display(original_prompt, response_text, time.time() - start_time, task_name, task_description, task_id)
|
1713
|
+
return response_text
|
1713
1714
|
else:
|
1714
1715
|
response = await self._openai_client.async_client.chat.completions.create(
|
1715
1716
|
model=self.llm,
|
@@ -1804,7 +1805,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1804
1805
|
|
1805
1806
|
# Apply guardrail validation for OpenAI client response
|
1806
1807
|
try:
|
1807
|
-
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
|
1808
|
+
validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools, task_name, task_description, task_id)
|
1809
|
+
# Execute callback after validation
|
1810
|
+
self._execute_callback_and_display(original_prompt, validated_response, time.time() - start_time, task_name, task_description, task_id)
|
1808
1811
|
return validated_response
|
1809
1812
|
except Exception as e:
|
1810
1813
|
logging.error(f"Agent {self.name}: Guardrail validation failed for OpenAI client: {e}")
|
praisonaiagents/llm/llm.py
CHANGED
@@ -477,6 +477,49 @@ class LLM:
|
|
477
477
|
logging.debug(f"[OLLAMA_FIX] Error validating arguments for {function_name}: {e}")
|
478
478
|
return arguments
|
479
479
|
|
480
|
+
def _handle_ollama_sequential_logic(self, iteration_count: int, accumulated_tool_results: List[Any],
|
481
|
+
response_text: str, messages: List[Dict]) -> tuple:
|
482
|
+
"""
|
483
|
+
Handle Ollama sequential tool execution logic to prevent premature tool summary generation.
|
484
|
+
|
485
|
+
This method implements the two-step process:
|
486
|
+
1. After reaching threshold with tool results, add explicit final answer prompt
|
487
|
+
2. Only generate tool summary if LLM still doesn't respond after explicit prompt
|
488
|
+
|
489
|
+
Args:
|
490
|
+
iteration_count: Current iteration count
|
491
|
+
accumulated_tool_results: List of tool results from all iterations
|
492
|
+
response_text: Current LLM response text
|
493
|
+
messages: Message history list to potentially modify
|
494
|
+
|
495
|
+
Returns:
|
496
|
+
tuple: (should_break, final_response_text, iteration_count)
|
497
|
+
- should_break: Whether to break the iteration loop
|
498
|
+
- final_response_text: Text to use as final response (None if continuing)
|
499
|
+
- iteration_count: Updated iteration count
|
500
|
+
"""
|
501
|
+
if not (self._is_ollama_provider() and iteration_count >= self.OLLAMA_SUMMARY_ITERATION_THRESHOLD):
|
502
|
+
return False, None, iteration_count
|
503
|
+
|
504
|
+
# For Ollama: if we have meaningful tool results but empty responses,
|
505
|
+
# give LLM one final chance with explicit prompt for final answer
|
506
|
+
if accumulated_tool_results and iteration_count == self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
|
507
|
+
# Add explicit prompt asking for final answer
|
508
|
+
messages.append({
|
509
|
+
"role": "user",
|
510
|
+
"content": self.OLLAMA_FINAL_ANSWER_PROMPT
|
511
|
+
})
|
512
|
+
# Continue to next iteration to get the final response
|
513
|
+
iteration_count += 1
|
514
|
+
return False, None, iteration_count
|
515
|
+
else:
|
516
|
+
# If still no response after final answer prompt, generate summary
|
517
|
+
tool_summary = self._generate_ollama_tool_summary(accumulated_tool_results, response_text)
|
518
|
+
if tool_summary:
|
519
|
+
return True, tool_summary, iteration_count
|
520
|
+
|
521
|
+
return False, None, iteration_count
|
522
|
+
|
480
523
|
def _needs_system_message_skip(self) -> bool:
|
481
524
|
"""Check if this model requires skipping system messages"""
|
482
525
|
if not self.model:
|
@@ -1132,11 +1175,15 @@ class LLM:
|
|
1132
1175
|
|
1133
1176
|
# Special handling for Ollama to prevent infinite loops
|
1134
1177
|
# Only generate summary after multiple iterations to allow sequential execution
|
1135
|
-
|
1136
|
-
|
1137
|
-
|
1138
|
-
|
1139
|
-
|
1178
|
+
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
1179
|
+
iteration_count, accumulated_tool_results, response_text, messages
|
1180
|
+
)
|
1181
|
+
if should_break:
|
1182
|
+
final_response_text = tool_summary_text
|
1183
|
+
break
|
1184
|
+
elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
|
1185
|
+
# Continue iteration after adding final answer prompt
|
1186
|
+
continue
|
1140
1187
|
|
1141
1188
|
# Safety check: prevent infinite loops for any provider
|
1142
1189
|
if iteration_count >= 5:
|
@@ -1911,11 +1958,15 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1911
1958
|
|
1912
1959
|
# Special handling for Ollama to prevent infinite loops
|
1913
1960
|
# Only generate summary after multiple iterations to allow sequential execution
|
1914
|
-
|
1915
|
-
|
1916
|
-
|
1917
|
-
|
1918
|
-
|
1961
|
+
should_break, tool_summary_text, iteration_count = self._handle_ollama_sequential_logic(
|
1962
|
+
iteration_count, accumulated_tool_results, response_text, messages
|
1963
|
+
)
|
1964
|
+
if should_break:
|
1965
|
+
final_response_text = tool_summary_text
|
1966
|
+
break
|
1967
|
+
elif tool_summary_text is None and iteration_count > self.OLLAMA_SUMMARY_ITERATION_THRESHOLD:
|
1968
|
+
# Continue iteration after adding final answer prompt
|
1969
|
+
continue
|
1919
1970
|
|
1920
1971
|
# Safety check: prevent infinite loops for any provider
|
1921
1972
|
if iteration_count >= 5:
|
@@ -2417,18 +2468,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2417
2468
|
)
|
2418
2469
|
|
2419
2470
|
if stream:
|
2420
|
-
|
2421
|
-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2422
|
-
for chunk in litellm.completion(**completion_params):
|
2423
|
-
content = self._process_streaming_chunk(chunk)
|
2424
|
-
if content:
|
2425
|
-
response_text += content
|
2426
|
-
live.update(display_generating(response_text, start_time))
|
2427
|
-
else:
|
2471
|
+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2428
2472
|
for chunk in litellm.completion(**completion_params):
|
2429
2473
|
content = self._process_streaming_chunk(chunk)
|
2430
2474
|
if content:
|
2431
2475
|
response_text += content
|
2476
|
+
live.update(display_generating(response_text, start_time))
|
2477
|
+
if content:
|
2478
|
+
response_text += content
|
2432
2479
|
else:
|
2433
2480
|
response = litellm.completion(**completion_params)
|
2434
2481
|
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
@@ -2517,18 +2564,14 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2517
2564
|
)
|
2518
2565
|
|
2519
2566
|
if stream:
|
2520
|
-
|
2521
|
-
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2522
|
-
async for chunk in await litellm.acompletion(**completion_params):
|
2523
|
-
content = self._process_streaming_chunk(chunk)
|
2524
|
-
if content:
|
2525
|
-
response_text += content
|
2526
|
-
live.update(display_generating(response_text, start_time))
|
2527
|
-
else:
|
2567
|
+
with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
|
2528
2568
|
async for chunk in await litellm.acompletion(**completion_params):
|
2529
2569
|
content = self._process_streaming_chunk(chunk)
|
2530
2570
|
if content:
|
2531
2571
|
response_text += content
|
2572
|
+
live.update(display_generating(response_text, start_time))
|
2573
|
+
if content:
|
2574
|
+
response_text += content
|
2532
2575
|
else:
|
2533
2576
|
response = await litellm.acompletion(**completion_params)
|
2534
2577
|
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
@@ -3,7 +3,7 @@ praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9
|
|
3
3
|
praisonaiagents/main.py,sha256=b5dKlkf6NMeumSzixreHB9ui90f8YMAi5r1fCbTpQVw,17225
|
4
4
|
praisonaiagents/session.py,sha256=FHWButPBaFGA4x1U_2gImroQChHnFy231_aAa_n5KOQ,20364
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=FkjW6f3EU8heQ9tvctfLbOWV9_dOXmS1PcFNgcStns8,403
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=mtI8Y_OWHQ_zLpIq5LWuuqYhYr5ipzEUsWSlSm2hN00,125235
|
7
7
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
8
8
|
praisonaiagents/agent/image_agent.py,sha256=Bbwg_h3qhjhG7gMH8sdcQXhcOFgE_wSvcdhtqH5f2UM,9145
|
9
9
|
praisonaiagents/agent/router_agent.py,sha256=a_b6w5Ti05gvK80uKGMIcT14fiCTKv8rCQPCWAUfIiE,12713
|
@@ -17,7 +17,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
17
17
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
18
18
|
praisonaiagents/knowledge/knowledge.py,sha256=-di_h9HxXQfAhTMMerhK16tfw8DtUndp44TGkBOzkZs,15539
|
19
19
|
praisonaiagents/llm/__init__.py,sha256=tHvWq5mv4K4MhWr0s6rqox8UnJ5RK0kXhYuD40WkZQA,1747
|
20
|
-
praisonaiagents/llm/llm.py,sha256=
|
20
|
+
praisonaiagents/llm/llm.py,sha256=JJ-MT09nC9h5NSYeBTG0LoLmeODNiyRGq09-VgLW1Ok,134482
|
21
21
|
praisonaiagents/llm/model_capabilities.py,sha256=cxOvZcjZ_PIEpUYKn3S2FMyypfOSfbGpx4vmV7Y5vhI,3967
|
22
22
|
praisonaiagents/llm/model_router.py,sha256=Jy2pShlkLxqXF3quz-MRB3-6L9vaUSgUrf2YJs_Tsg0,13995
|
23
23
|
praisonaiagents/llm/openai_client.py,sha256=EgWjkDjVpnLKCp1gBFjccDGyqR1anOcSYJYCo45fuEI,46046
|
@@ -57,7 +57,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
57
57
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
58
58
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
59
59
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
60
|
-
praisonaiagents-0.0.
|
61
|
-
praisonaiagents-0.0.
|
62
|
-
praisonaiagents-0.0.
|
63
|
-
praisonaiagents-0.0.
|
60
|
+
praisonaiagents-0.0.141.dist-info/METADATA,sha256=QTjOqWcXdAooalUe3_TtFp2NyMXuj_cof83ZiOxFPtA,1673
|
61
|
+
praisonaiagents-0.0.141.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
62
|
+
praisonaiagents-0.0.141.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
63
|
+
praisonaiagents-0.0.141.dist-info/RECORD,,
|
File without changes
|
File without changes
|