praisonaiagents 0.0.126__tar.gz → 0.0.128__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/PKG-INFO +1 -1
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/agents/agents.py +15 -17
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/agents/autoagents.py +1 -1
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/llm/llm.py +13 -230
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/llm/openai_client.py +4 -78
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/process/process.py +10 -6
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/pyproject.toml +1 -1
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/README.md +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/llm/model_capabilities.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/setup.cfg +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test_fix_comprehensive.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test_http_stream_basic.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test_posthog_fixed.py +0 -0
- {praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/tests/test_validation_feedback.py +0 -0
@@ -480,24 +480,22 @@ Context:
|
|
480
480
|
)
|
481
481
|
|
482
482
|
if self.process == "workflow":
|
483
|
-
|
484
|
-
parallel_tasks = []
|
483
|
+
tasks_to_run = []
|
485
484
|
async for task_id in process.aworkflow():
|
486
|
-
if self.tasks[task_id].async_execution
|
487
|
-
|
488
|
-
|
489
|
-
#
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
496
|
-
|
497
|
-
|
498
|
-
|
499
|
-
|
500
|
-
await asyncio.gather(*[self.arun_task(t) for t in parallel_tasks])
|
485
|
+
if self.tasks[task_id].async_execution:
|
486
|
+
tasks_to_run.append(self.arun_task(task_id))
|
487
|
+
else:
|
488
|
+
# If we encounter a sync task, we must wait for the previous async tasks to finish.
|
489
|
+
if tasks_to_run:
|
490
|
+
await asyncio.gather(*tasks_to_run)
|
491
|
+
tasks_to_run = []
|
492
|
+
|
493
|
+
# Run sync task in an executor to avoid blocking the event loop
|
494
|
+
loop = asyncio.get_event_loop()
|
495
|
+
await loop.run_in_executor(None, self.run_task, task_id)
|
496
|
+
|
497
|
+
if tasks_to_run:
|
498
|
+
await asyncio.gather(*tasks_to_run)
|
501
499
|
|
502
500
|
elif self.process == "sequential":
|
503
501
|
async for task_id in process.asequential():
|
@@ -136,7 +136,7 @@ class AutoAgents(PraisonAIAgents):
|
|
136
136
|
completion_checker=completion_checker,
|
137
137
|
max_retries=max_retries,
|
138
138
|
process=process,
|
139
|
-
manager_llm=manager_llm
|
139
|
+
manager_llm=manager_llm or self.llm
|
140
140
|
)
|
141
141
|
|
142
142
|
def _display_agents_and_tasks(self, agents: List[Agent], tasks: List[Task]):
|
@@ -772,7 +772,7 @@ class LLM:
|
|
772
772
|
if formatted_tools and self._supports_streaming_tools():
|
773
773
|
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
|
774
774
|
|
775
|
-
response_text = response_text.strip()
|
775
|
+
response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else "" if response_text else ""
|
776
776
|
|
777
777
|
# Create a mock final_response with the captured data
|
778
778
|
final_response = {
|
@@ -858,102 +858,6 @@ class LLM:
|
|
858
858
|
iteration_count += 1
|
859
859
|
continue
|
860
860
|
|
861
|
-
# Special handling for Ollama models that don't automatically process tool results
|
862
|
-
ollama_handled = False
|
863
|
-
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
864
|
-
|
865
|
-
if ollama_params:
|
866
|
-
# Get response based on streaming mode
|
867
|
-
if stream:
|
868
|
-
# Streaming approach
|
869
|
-
if verbose:
|
870
|
-
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
871
|
-
response_text = ""
|
872
|
-
for chunk in litellm.completion(
|
873
|
-
**self._build_completion_params(
|
874
|
-
messages=ollama_params["follow_up_messages"],
|
875
|
-
temperature=temperature,
|
876
|
-
stream=True
|
877
|
-
)
|
878
|
-
):
|
879
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
880
|
-
content = chunk.choices[0].delta.content
|
881
|
-
response_text += content
|
882
|
-
live.update(display_generating(response_text, start_time))
|
883
|
-
else:
|
884
|
-
response_text = ""
|
885
|
-
for chunk in litellm.completion(
|
886
|
-
**self._build_completion_params(
|
887
|
-
messages=ollama_params["follow_up_messages"],
|
888
|
-
temperature=temperature,
|
889
|
-
stream=True
|
890
|
-
)
|
891
|
-
):
|
892
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
893
|
-
response_text += chunk.choices[0].delta.content
|
894
|
-
else:
|
895
|
-
# Non-streaming approach
|
896
|
-
resp = litellm.completion(
|
897
|
-
**self._build_completion_params(
|
898
|
-
messages=ollama_params["follow_up_messages"],
|
899
|
-
temperature=temperature,
|
900
|
-
stream=False
|
901
|
-
)
|
902
|
-
)
|
903
|
-
response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
|
904
|
-
|
905
|
-
# Set flag to indicate Ollama was handled
|
906
|
-
ollama_handled = True
|
907
|
-
final_response_text = response_text.strip()
|
908
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
909
|
-
|
910
|
-
# Display the response if we got one
|
911
|
-
if final_response_text and verbose:
|
912
|
-
display_interaction(
|
913
|
-
ollama_params["original_prompt"],
|
914
|
-
final_response_text,
|
915
|
-
markdown=markdown,
|
916
|
-
generation_time=time.time() - start_time,
|
917
|
-
console=console
|
918
|
-
)
|
919
|
-
|
920
|
-
# Update messages and continue the loop instead of returning
|
921
|
-
if final_response_text:
|
922
|
-
# Update messages with the response to maintain conversation context
|
923
|
-
messages.append({
|
924
|
-
"role": "assistant",
|
925
|
-
"content": final_response_text
|
926
|
-
})
|
927
|
-
# Continue the loop to check if more tools are needed
|
928
|
-
iteration_count += 1
|
929
|
-
continue
|
930
|
-
else:
|
931
|
-
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
932
|
-
|
933
|
-
# Handle reasoning_steps after tool execution if not already handled by Ollama
|
934
|
-
if reasoning_steps and not ollama_handled:
|
935
|
-
# Make a non-streaming call to capture reasoning content
|
936
|
-
reasoning_resp = litellm.completion(
|
937
|
-
**self._build_completion_params(
|
938
|
-
messages=messages,
|
939
|
-
temperature=temperature,
|
940
|
-
stream=False, # force non-streaming
|
941
|
-
**{k:v for k,v in kwargs.items() if k != 'reasoning_steps'}
|
942
|
-
)
|
943
|
-
)
|
944
|
-
reasoning_content = reasoning_resp["choices"][0]["message"].get("provider_specific_fields", {}).get("reasoning_content")
|
945
|
-
response_text = reasoning_resp["choices"][0]["message"]["content"]
|
946
|
-
|
947
|
-
# Store reasoning content for later use
|
948
|
-
if reasoning_content:
|
949
|
-
stored_reasoning_content = reasoning_content
|
950
|
-
|
951
|
-
# Update messages with the response
|
952
|
-
messages.append({
|
953
|
-
"role": "assistant",
|
954
|
-
"content": response_text
|
955
|
-
})
|
956
|
-
|
957
861
|
# After tool execution, continue the loop to check if more tools are needed
|
958
862
|
# instead of immediately trying to get a final response
|
959
863
|
iteration_count += 1
|
@@ -962,7 +866,7 @@ class LLM:
|
|
962
866
|
# No tool calls, we're done with this iteration
|
963
867
|
# If we've executed tools in previous iterations, this response contains the final answer
|
964
868
|
if iteration_count > 0:
|
965
|
-
final_response_text = response_text.strip()
|
869
|
+
final_response_text = response_text.strip() if response_text else ""
|
966
870
|
break
|
967
871
|
|
968
872
|
except Exception as e:
|
@@ -993,7 +897,7 @@ class LLM:
|
|
993
897
|
console=console
|
994
898
|
)
|
995
899
|
|
996
|
-
response_text = response_text.strip()
|
900
|
+
response_text = response_text.strip() if response_text else ""
|
997
901
|
|
998
902
|
# Return reasoning content if reasoning_steps is True and we have it
|
999
903
|
if reasoning_steps and stored_reasoning_content:
|
@@ -1155,7 +1059,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1155
1059
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1156
1060
|
response_text += chunk.choices[0].delta.content
|
1157
1061
|
|
1158
|
-
response_text = response_text.strip()
|
1062
|
+
response_text = response_text.strip() if response_text else "" if response_text else ""
|
1159
1063
|
continue
|
1160
1064
|
|
1161
1065
|
except json.JSONDecodeError:
|
@@ -1367,7 +1271,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1367
1271
|
if formatted_tools and self._supports_streaming_tools():
|
1368
1272
|
tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
|
1369
1273
|
|
1370
|
-
response_text = response_text.strip()
|
1274
|
+
response_text = response_text.strip() if response_text else "" if response_text else "" if response_text else ""
|
1371
1275
|
|
1372
1276
|
# We already have tool_calls from streaming if supported
|
1373
1277
|
# No need for a second API call!
|
@@ -1430,68 +1334,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1430
1334
|
# Get response after tool calls
|
1431
1335
|
response_text = ""
|
1432
1336
|
|
1433
|
-
#
|
1434
|
-
|
1435
|
-
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
1436
|
-
|
1437
|
-
if ollama_params:
|
1438
|
-
# Get response with streaming
|
1439
|
-
if verbose:
|
1440
|
-
response_text = ""
|
1441
|
-
async for chunk in await litellm.acompletion(
|
1442
|
-
**self._build_completion_params(
|
1443
|
-
messages=ollama_params["follow_up_messages"],
|
1444
|
-
temperature=temperature,
|
1445
|
-
stream=stream
|
1446
|
-
)
|
1447
|
-
):
|
1448
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1449
|
-
content = chunk.choices[0].delta.content
|
1450
|
-
response_text += content
|
1451
|
-
print("\033[K", end="\r")
|
1452
|
-
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
|
1453
|
-
else:
|
1454
|
-
response_text = ""
|
1455
|
-
async for chunk in await litellm.acompletion(
|
1456
|
-
**self._build_completion_params(
|
1457
|
-
messages=ollama_params["follow_up_messages"],
|
1458
|
-
temperature=temperature,
|
1459
|
-
stream=stream
|
1460
|
-
)
|
1461
|
-
):
|
1462
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1463
|
-
response_text += chunk.choices[0].delta.content
|
1464
|
-
|
1465
|
-
# Set flag to indicate Ollama was handled
|
1466
|
-
ollama_handled = True
|
1467
|
-
final_response_text = response_text.strip()
|
1468
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
1469
|
-
|
1470
|
-
# Display the response if we got one
|
1471
|
-
if final_response_text and verbose:
|
1472
|
-
display_interaction(
|
1473
|
-
ollama_params["original_prompt"],
|
1474
|
-
final_response_text,
|
1475
|
-
markdown=markdown,
|
1476
|
-
generation_time=time.time() - start_time,
|
1477
|
-
console=console
|
1478
|
-
)
|
1479
|
-
|
1480
|
-
# Store the response for potential final return
|
1481
|
-
if final_response_text:
|
1482
|
-
# Update messages with the response to maintain conversation context
|
1483
|
-
messages.append({
|
1484
|
-
"role": "assistant",
|
1485
|
-
"content": final_response_text
|
1486
|
-
})
|
1487
|
-
# Continue the loop to check if more tools are needed
|
1488
|
-
iteration_count += 1
|
1489
|
-
continue
|
1490
|
-
else:
|
1491
|
-
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
1492
|
-
|
1493
|
-
# If no special handling was needed or if it's not an Ollama model
|
1494
|
-
if reasoning_steps and not ollama_handled:
|
1337
|
+
# If no special handling was needed
|
1338
|
+
if reasoning_steps:
|
1495
1339
|
# Non-streaming call to capture reasoning
|
1496
1340
|
resp = await litellm.acompletion(
|
1497
1341
|
**self._build_completion_params(
|
@@ -1521,7 +1365,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1521
1365
|
generation_time=time.time() - start_time,
|
1522
1366
|
console=console
|
1523
1367
|
)
|
1524
|
-
|
1368
|
+
else:
|
1525
1369
|
# Get response after tool calls with streaming if not already handled
|
1526
1370
|
if verbose:
|
1527
1371
|
async for chunk in await litellm.acompletion(
|
@@ -1551,7 +1395,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1551
1395
|
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1552
1396
|
response_text += chunk.choices[0].delta.content
|
1553
1397
|
|
1554
|
-
response_text = response_text.strip()
|
1398
|
+
response_text = response_text.strip() if response_text else "" if response_text else ""
|
1555
1399
|
|
1556
1400
|
# After tool execution, update messages and continue the loop
|
1557
1401
|
if response_text:
|
@@ -1790,67 +1634,6 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1790
1634
|
|
1791
1635
|
litellm.callbacks = events
|
1792
1636
|
|
1793
|
-
def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
|
1794
|
-
"""
|
1795
|
-
Handle special Ollama model requirements when processing tool results.
|
1796
|
-
|
1797
|
-
Args:
|
1798
|
-
response_text: The initial response text from the model
|
1799
|
-
tool_results: List of tool execution results
|
1800
|
-
messages: The conversation messages list
|
1801
|
-
original_prompt: The original user prompt
|
1802
|
-
|
1803
|
-
Returns:
|
1804
|
-
Dict with follow-up parameters if Ollama needs special handling, None otherwise
|
1805
|
-
"""
|
1806
|
-
if not self._is_ollama_provider() or not tool_results:
|
1807
|
-
return None
|
1808
|
-
|
1809
|
-
# Check if the response is just a JSON tool call
|
1810
|
-
try:
|
1811
|
-
json_response = json.loads(response_text.strip())
|
1812
|
-
if not (('name' in json_response or 'function' in json_response) and
|
1813
|
-
not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
|
1814
|
-
return None
|
1815
|
-
|
1816
|
-
logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
|
1817
|
-
|
1818
|
-
# Extract the original user query from messages
|
1819
|
-
original_query = ""
|
1820
|
-
for msg in reversed(messages): # Look from the end to find the most recent user message
|
1821
|
-
if msg.get("role") == "user":
|
1822
|
-
content = msg.get("content", "")
|
1823
|
-
# Handle list content (multimodal)
|
1824
|
-
if isinstance(content, list):
|
1825
|
-
for item in content:
|
1826
|
-
if isinstance(item, dict) and item.get("type") == "text":
|
1827
|
-
original_query = item.get("text", "")
|
1828
|
-
break
|
1829
|
-
else:
|
1830
|
-
original_query = content
|
1831
|
-
if original_query:
|
1832
|
-
break
|
1833
|
-
|
1834
|
-
# Create a shorter follow-up prompt with all tool results
|
1835
|
-
# If there's only one result, use it directly; otherwise combine them
|
1836
|
-
if len(tool_results) == 1:
|
1837
|
-
results_text = json.dumps(tool_results[0], indent=2)
|
1838
|
-
else:
|
1839
|
-
results_text = json.dumps(tool_results, indent=2)
|
1840
|
-
|
1841
|
-
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
1842
|
-
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
1843
|
-
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
1844
|
-
|
1845
|
-
# Return parameters for follow-up call
|
1846
|
-
return {
|
1847
|
-
"follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
|
1848
|
-
"original_prompt": original_prompt
|
1849
|
-
}
|
1850
|
-
|
1851
|
-
except (json.JSONDecodeError, KeyError):
|
1852
|
-
# Not a JSON response or not a tool call format
|
1853
|
-
return None
|
1854
1637
|
|
1855
1638
|
def _build_completion_params(self, **override_params) -> Dict[str, Any]:
|
1856
1639
|
"""Build parameters for litellm completion calls with all necessary config"""
|
@@ -2066,7 +1849,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2066
1849
|
response_text += content
|
2067
1850
|
else:
|
2068
1851
|
response = litellm.completion(**completion_params)
|
2069
|
-
response_text = response.choices[0].message.content.strip()
|
1852
|
+
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
2070
1853
|
|
2071
1854
|
if verbose:
|
2072
1855
|
display_interaction(
|
@@ -2077,7 +1860,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2077
1860
|
console=console or self.console
|
2078
1861
|
)
|
2079
1862
|
|
2080
|
-
return response_text.strip()
|
1863
|
+
return response_text.strip() if response_text else ""
|
2081
1864
|
|
2082
1865
|
except Exception as error:
|
2083
1866
|
display_error(f"Error in response: {str(error)}")
|
@@ -2154,7 +1937,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2154
1937
|
response_text += content
|
2155
1938
|
else:
|
2156
1939
|
response = await litellm.acompletion(**completion_params)
|
2157
|
-
response_text = response.choices[0].message.content.strip()
|
1940
|
+
response_text = response.choices[0].message.content.strip() if response.choices[0].message.content else ""
|
2158
1941
|
|
2159
1942
|
if verbose:
|
2160
1943
|
display_interaction(
|
@@ -2165,7 +1948,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
2165
1948
|
console=console or self.console
|
2166
1949
|
)
|
2167
1950
|
|
2168
|
-
return response_text.strip()
|
1951
|
+
return response_text.strip() if response_text else ""
|
2169
1952
|
|
2170
1953
|
except Exception as error:
|
2171
1954
|
display_error(f"Error in response_async: {str(error)}")
|
@@ -890,45 +890,8 @@ class OpenAIClient:
|
|
890
890
|
"content": results_str
|
891
891
|
})
|
892
892
|
|
893
|
-
#
|
894
|
-
|
895
|
-
for tool_call in tool_calls:
|
896
|
-
# Handle both ToolCall dataclass and OpenAI object
|
897
|
-
if isinstance(tool_call, ToolCall):
|
898
|
-
function_name = tool_call.function["name"]
|
899
|
-
arguments = json.loads(tool_call.function["arguments"])
|
900
|
-
else:
|
901
|
-
function_name = tool_call.function.name
|
902
|
-
arguments = json.loads(tool_call.function.arguments)
|
903
|
-
|
904
|
-
# For sequential thinking tool, check if nextThoughtNeeded is True
|
905
|
-
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
906
|
-
should_continue = True
|
907
|
-
break
|
908
|
-
|
909
|
-
if not should_continue:
|
910
|
-
# Get final response after tool calls
|
911
|
-
if stream:
|
912
|
-
final_response = self.process_stream_response(
|
913
|
-
messages=messages,
|
914
|
-
model=model,
|
915
|
-
temperature=temperature,
|
916
|
-
tools=formatted_tools,
|
917
|
-
start_time=start_time,
|
918
|
-
console=console,
|
919
|
-
display_fn=display_fn,
|
920
|
-
reasoning_steps=reasoning_steps,
|
921
|
-
**kwargs
|
922
|
-
)
|
923
|
-
else:
|
924
|
-
final_response = self.create_completion(
|
925
|
-
messages=messages,
|
926
|
-
model=model,
|
927
|
-
temperature=temperature,
|
928
|
-
stream=False,
|
929
|
-
**kwargs
|
930
|
-
)
|
931
|
-
break
|
893
|
+
# Continue the loop to allow more tool calls
|
894
|
+
# The model will see tool results and can make additional tool calls
|
932
895
|
|
933
896
|
iteration_count += 1
|
934
897
|
else:
|
@@ -1067,45 +1030,8 @@ class OpenAIClient:
|
|
1067
1030
|
"content": results_str
|
1068
1031
|
})
|
1069
1032
|
|
1070
|
-
#
|
1071
|
-
|
1072
|
-
for tool_call in tool_calls:
|
1073
|
-
# Handle both ToolCall dataclass and OpenAI object
|
1074
|
-
if isinstance(tool_call, ToolCall):
|
1075
|
-
function_name = tool_call.function["name"]
|
1076
|
-
arguments = json.loads(tool_call.function["arguments"])
|
1077
|
-
else:
|
1078
|
-
function_name = tool_call.function.name
|
1079
|
-
arguments = json.loads(tool_call.function.arguments)
|
1080
|
-
|
1081
|
-
# For sequential thinking tool, check if nextThoughtNeeded is True
|
1082
|
-
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
1083
|
-
should_continue = True
|
1084
|
-
break
|
1085
|
-
|
1086
|
-
if not should_continue:
|
1087
|
-
# Get final response after tool calls
|
1088
|
-
if stream:
|
1089
|
-
final_response = await self.process_stream_response_async(
|
1090
|
-
messages=messages,
|
1091
|
-
model=model,
|
1092
|
-
temperature=temperature,
|
1093
|
-
tools=formatted_tools,
|
1094
|
-
start_time=start_time,
|
1095
|
-
console=console,
|
1096
|
-
display_fn=display_fn,
|
1097
|
-
reasoning_steps=reasoning_steps,
|
1098
|
-
**kwargs
|
1099
|
-
)
|
1100
|
-
else:
|
1101
|
-
final_response = await self.acreate_completion(
|
1102
|
-
messages=messages,
|
1103
|
-
model=model,
|
1104
|
-
temperature=temperature,
|
1105
|
-
stream=False,
|
1106
|
-
**kwargs
|
1107
|
-
)
|
1108
|
-
break
|
1033
|
+
# Continue the loop to allow more tool calls
|
1034
|
+
# The model will see tool results and can make additional tool calls
|
1109
1035
|
|
1110
1036
|
iteration_count += 1
|
1111
1037
|
else:
|
@@ -469,16 +469,18 @@ Subtask: {st.name}
|
|
469
469
|
logging.debug(f"Task type: {task_to_check.task_type}")
|
470
470
|
logging.debug(f"Task status before reset check: {task_to_check.status}")
|
471
471
|
logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
|
472
|
+
logging.debug(f"Task async_execution: {task_to_check.async_execution}")
|
472
473
|
|
473
474
|
if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
|
474
475
|
task_to_check.task_type != "loop" and # Removed "decision" from exclusion
|
475
476
|
not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
|
476
|
-
for t in self.tasks.values())
|
477
|
-
|
477
|
+
for t in self.tasks.values()) and
|
478
|
+
not task_to_check.async_execution): # Don't reset async parallel tasks
|
479
|
+
logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
|
478
480
|
self.tasks[task_id].status = "not started"
|
479
481
|
logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
|
480
482
|
else:
|
481
|
-
logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
|
483
|
+
logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
|
482
484
|
logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
|
483
485
|
|
484
486
|
# Handle loop progression
|
@@ -1099,16 +1101,18 @@ Subtask: {st.name}
|
|
1099
1101
|
logging.debug(f"Task type: {task_to_check.task_type}")
|
1100
1102
|
logging.debug(f"Task status before reset check: {task_to_check.status}")
|
1101
1103
|
logging.debug(f"Task rerun: {getattr(task_to_check, 'rerun', True)}") # default to True if not set
|
1104
|
+
logging.debug(f"Task async_execution: {task_to_check.async_execution}")
|
1102
1105
|
|
1103
1106
|
if (getattr(task_to_check, 'rerun', True) and # Corrected condition - reset only if rerun is True (or default True)
|
1104
1107
|
task_to_check.task_type != "loop" and # Removed "decision" from exclusion
|
1105
1108
|
not any(t.task_type == "loop" and subtask_name.startswith(t.name + "_")
|
1106
|
-
for t in self.tasks.values())
|
1107
|
-
|
1109
|
+
for t in self.tasks.values()) and
|
1110
|
+
not task_to_check.async_execution): # Don't reset async parallel tasks
|
1111
|
+
logging.debug(f"=== Resetting non-loop, non-decision, non-parallel task {subtask_name} to 'not started' ===")
|
1108
1112
|
self.tasks[task_id].status = "not started"
|
1109
1113
|
logging.debug(f"Task status after reset: {self.tasks[task_id].status}")
|
1110
1114
|
else:
|
1111
|
-
logging.debug(f"=== Skipping reset for loop/decision/subtask or rerun=False: {subtask_name} ===")
|
1115
|
+
logging.debug(f"=== Skipping reset for loop/decision/subtask/parallel or rerun=False: {subtask_name} ===")
|
1112
1116
|
logging.debug(f"Keeping status as: {self.tasks[task_id].status}")
|
1113
1117
|
|
1114
1118
|
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/guardrail_result.py
RENAMED
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/guardrails/llm_guardrail.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/llm/model_capabilities.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/telemetry/integration.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/calculator_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/duckduckgo_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/newspaper_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/train/data/generatecot.py
RENAMED
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents/tools/wikipedia_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.126 → praisonaiagents-0.0.128}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|