praisonaiagents 0.0.117__tar.gz → 0.0.118__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/PKG-INFO +1 -1
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/llm/llm.py +161 -185
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/PKG-INFO +1 -1
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/pyproject.toml +1 -1
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/README.md +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/agent/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/agent/agent.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/agent/handoff.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/agent/image_agent.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/agents/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/agents/agents.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/agents/autoagents.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/approval.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/guardrail_result.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/knowledge/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/knowledge/chunking.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/knowledge/knowledge.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/llm/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/llm/openai_client.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/main.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/mcp/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/mcp/mcp.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/mcp/mcp_sse.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/memory/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/memory/memory.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/process/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/process/process.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/session.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/task/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/task/task.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/telemetry/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/telemetry/integration.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/telemetry/telemetry.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/README.md +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/__init__.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/arxiv_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/calculator_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/csv_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/duckdb_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/excel_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/file_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/json_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/newspaper_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/pandas_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/python_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/searxng_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/shell_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/spider_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/test.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/train/data/generatecot.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/wikipedia_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/xml_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/yaml_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/yfinance_tools.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/SOURCES.txt +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/dependency_links.txt +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/requires.txt +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/top_level.txt +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/setup.cfg +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/tests/test-graph-memory.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/tests/test.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/tests/test_handoff_compatibility.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/tests/test_ollama_async_fix.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/tests/test_ollama_fix.py +0 -0
- {praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/tests/test_posthog_fixed.py +0 -0
@@ -813,99 +813,56 @@ class LLM:
|
|
813
813
|
# Make one more call to get the final summary response
|
814
814
|
# Special handling for Ollama models that don't automatically process tool results
|
815
815
|
ollama_handled = False
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
original_query = ""
|
829
|
-
for msg in reversed(messages): # Look from the end to find the most recent user message
|
830
|
-
if msg.get("role") == "user":
|
831
|
-
content = msg.get("content", "")
|
832
|
-
# Handle list content (multimodal)
|
833
|
-
if isinstance(content, list):
|
834
|
-
for item in content:
|
835
|
-
if isinstance(item, dict) and item.get("type") == "text":
|
836
|
-
original_query = item.get("text", "")
|
837
|
-
break
|
838
|
-
else:
|
839
|
-
original_query = content
|
840
|
-
if original_query:
|
841
|
-
break
|
842
|
-
|
843
|
-
# Create a shorter follow-up prompt with all tool results
|
844
|
-
# If there's only one result, use it directly; otherwise combine them
|
845
|
-
if len(tool_results) == 1:
|
846
|
-
results_text = json.dumps(tool_results[0], indent=2)
|
847
|
-
else:
|
848
|
-
results_text = json.dumps(tool_results, indent=2)
|
849
|
-
|
850
|
-
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
851
|
-
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
852
|
-
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
853
|
-
|
854
|
-
# Make a follow-up call to process the results
|
855
|
-
follow_up_messages = [
|
856
|
-
{"role": "user", "content": follow_up_prompt}
|
857
|
-
]
|
858
|
-
|
859
|
-
# Get response with streaming
|
860
|
-
if verbose:
|
861
|
-
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
862
|
-
response_text = ""
|
863
|
-
for chunk in litellm.completion(
|
864
|
-
**self._build_completion_params(
|
865
|
-
messages=follow_up_messages,
|
866
|
-
temperature=temperature,
|
867
|
-
stream=stream
|
868
|
-
)
|
869
|
-
):
|
870
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
871
|
-
content = chunk.choices[0].delta.content
|
872
|
-
response_text += content
|
873
|
-
live.update(display_generating(response_text, start_time))
|
874
|
-
else:
|
875
|
-
response_text = ""
|
876
|
-
for chunk in litellm.completion(
|
877
|
-
**self._build_completion_params(
|
878
|
-
messages=follow_up_messages,
|
879
|
-
temperature=temperature,
|
880
|
-
stream=stream
|
881
|
-
)
|
882
|
-
):
|
883
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
884
|
-
response_text += chunk.choices[0].delta.content
|
885
|
-
|
886
|
-
# Set flag to indicate Ollama was handled
|
887
|
-
ollama_handled = True
|
888
|
-
final_response_text = response_text.strip()
|
889
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
890
|
-
|
891
|
-
# Display the response if we got one
|
892
|
-
if final_response_text and verbose:
|
893
|
-
display_interaction(
|
894
|
-
original_prompt,
|
895
|
-
final_response_text,
|
896
|
-
markdown=markdown,
|
897
|
-
generation_time=time.time() - start_time,
|
898
|
-
console=console
|
816
|
+
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
817
|
+
|
818
|
+
if ollama_params:
|
819
|
+
# Get response with streaming
|
820
|
+
if verbose:
|
821
|
+
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
822
|
+
response_text = ""
|
823
|
+
for chunk in litellm.completion(
|
824
|
+
**self._build_completion_params(
|
825
|
+
messages=ollama_params["follow_up_messages"],
|
826
|
+
temperature=temperature,
|
827
|
+
stream=stream
|
899
828
|
)
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
907
|
-
|
908
|
-
|
829
|
+
):
|
830
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
831
|
+
content = chunk.choices[0].delta.content
|
832
|
+
response_text += content
|
833
|
+
live.update(display_generating(response_text, start_time))
|
834
|
+
else:
|
835
|
+
response_text = ""
|
836
|
+
for chunk in litellm.completion(
|
837
|
+
**self._build_completion_params(
|
838
|
+
messages=ollama_params["follow_up_messages"],
|
839
|
+
temperature=temperature,
|
840
|
+
stream=stream
|
841
|
+
)
|
842
|
+
):
|
843
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
844
|
+
response_text += chunk.choices[0].delta.content
|
845
|
+
|
846
|
+
# Set flag to indicate Ollama was handled
|
847
|
+
ollama_handled = True
|
848
|
+
final_response_text = response_text.strip()
|
849
|
+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
850
|
+
|
851
|
+
# Display the response if we got one
|
852
|
+
if final_response_text and verbose:
|
853
|
+
display_interaction(
|
854
|
+
ollama_params["original_prompt"],
|
855
|
+
final_response_text,
|
856
|
+
markdown=markdown,
|
857
|
+
generation_time=time.time() - start_time,
|
858
|
+
console=console
|
859
|
+
)
|
860
|
+
|
861
|
+
# Return the final response after processing Ollama's follow-up
|
862
|
+
if final_response_text:
|
863
|
+
return final_response_text
|
864
|
+
else:
|
865
|
+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
909
866
|
|
910
867
|
# If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
|
911
868
|
if reasoning_steps and not ollama_handled:
|
@@ -1480,99 +1437,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1480
1437
|
|
1481
1438
|
# Special handling for Ollama models that don't automatically process tool results
|
1482
1439
|
ollama_handled = False
|
1483
|
-
|
1484
|
-
|
1485
|
-
|
1486
|
-
|
1487
|
-
|
1488
|
-
|
1489
|
-
|
1490
|
-
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1497
|
-
|
1498
|
-
|
1499
|
-
|
1500
|
-
|
1501
|
-
|
1502
|
-
|
1503
|
-
|
1504
|
-
|
1505
|
-
|
1506
|
-
|
1507
|
-
|
1508
|
-
|
1509
|
-
|
1510
|
-
|
1511
|
-
|
1512
|
-
|
1513
|
-
|
1514
|
-
|
1515
|
-
|
1516
|
-
|
1517
|
-
|
1518
|
-
|
1519
|
-
|
1520
|
-
|
1521
|
-
|
1522
|
-
|
1523
|
-
|
1524
|
-
|
1525
|
-
|
1526
|
-
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1530
|
-
|
1531
|
-
|
1532
|
-
|
1533
|
-
stream=stream
|
1534
|
-
)
|
1535
|
-
):
|
1536
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1537
|
-
content = chunk.choices[0].delta.content
|
1538
|
-
response_text += content
|
1539
|
-
print("\033[K", end="\r")
|
1540
|
-
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
|
1541
|
-
else:
|
1542
|
-
response_text = ""
|
1543
|
-
async for chunk in await litellm.acompletion(
|
1544
|
-
**self._build_completion_params(
|
1545
|
-
messages=follow_up_messages,
|
1546
|
-
temperature=temperature,
|
1547
|
-
stream=stream
|
1548
|
-
)
|
1549
|
-
):
|
1550
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1551
|
-
response_text += chunk.choices[0].delta.content
|
1552
|
-
|
1553
|
-
# Set flag to indicate Ollama was handled
|
1554
|
-
ollama_handled = True
|
1555
|
-
final_response_text = response_text.strip()
|
1556
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
1557
|
-
|
1558
|
-
# Display the response if we got one
|
1559
|
-
if final_response_text and verbose:
|
1560
|
-
display_interaction(
|
1561
|
-
original_prompt,
|
1562
|
-
final_response_text,
|
1563
|
-
markdown=markdown,
|
1564
|
-
generation_time=time.time() - start_time,
|
1565
|
-
console=console
|
1566
|
-
)
|
1567
|
-
|
1568
|
-
# Return the final response after processing Ollama's follow-up
|
1569
|
-
if final_response_text:
|
1570
|
-
return final_response_text
|
1571
|
-
else:
|
1572
|
-
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
1573
|
-
except (json.JSONDecodeError, KeyError):
|
1574
|
-
# Not a JSON response or not a tool call format, continue normally
|
1575
|
-
pass
|
1440
|
+
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
1441
|
+
|
1442
|
+
if ollama_params:
|
1443
|
+
# Get response with streaming
|
1444
|
+
if verbose:
|
1445
|
+
response_text = ""
|
1446
|
+
async for chunk in await litellm.acompletion(
|
1447
|
+
**self._build_completion_params(
|
1448
|
+
messages=ollama_params["follow_up_messages"],
|
1449
|
+
temperature=temperature,
|
1450
|
+
stream=stream
|
1451
|
+
)
|
1452
|
+
):
|
1453
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1454
|
+
content = chunk.choices[0].delta.content
|
1455
|
+
response_text += content
|
1456
|
+
print("\033[K", end="\r")
|
1457
|
+
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
|
1458
|
+
else:
|
1459
|
+
response_text = ""
|
1460
|
+
async for chunk in await litellm.acompletion(
|
1461
|
+
**self._build_completion_params(
|
1462
|
+
messages=ollama_params["follow_up_messages"],
|
1463
|
+
temperature=temperature,
|
1464
|
+
stream=stream
|
1465
|
+
)
|
1466
|
+
):
|
1467
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1468
|
+
response_text += chunk.choices[0].delta.content
|
1469
|
+
|
1470
|
+
# Set flag to indicate Ollama was handled
|
1471
|
+
ollama_handled = True
|
1472
|
+
final_response_text = response_text.strip()
|
1473
|
+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
1474
|
+
|
1475
|
+
# Display the response if we got one
|
1476
|
+
if final_response_text and verbose:
|
1477
|
+
display_interaction(
|
1478
|
+
ollama_params["original_prompt"],
|
1479
|
+
final_response_text,
|
1480
|
+
markdown=markdown,
|
1481
|
+
generation_time=time.time() - start_time,
|
1482
|
+
console=console
|
1483
|
+
)
|
1484
|
+
|
1485
|
+
# Return the final response after processing Ollama's follow-up
|
1486
|
+
if final_response_text:
|
1487
|
+
return final_response_text
|
1488
|
+
else:
|
1489
|
+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
1576
1490
|
|
1577
1491
|
# If no special handling was needed or if it's not an Ollama model
|
1578
1492
|
if reasoning_steps and not ollama_handled:
|
@@ -1839,6 +1753,68 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1839
1753
|
|
1840
1754
|
litellm.callbacks = events
|
1841
1755
|
|
1756
|
+
def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
|
1757
|
+
"""
|
1758
|
+
Handle special Ollama model requirements when processing tool results.
|
1759
|
+
|
1760
|
+
Args:
|
1761
|
+
response_text: The initial response text from the model
|
1762
|
+
tool_results: List of tool execution results
|
1763
|
+
messages: The conversation messages list
|
1764
|
+
original_prompt: The original user prompt
|
1765
|
+
|
1766
|
+
Returns:
|
1767
|
+
Dict with follow-up parameters if Ollama needs special handling, None otherwise
|
1768
|
+
"""
|
1769
|
+
if not self._is_ollama_provider() or not tool_results:
|
1770
|
+
return None
|
1771
|
+
|
1772
|
+
# Check if the response is just a JSON tool call
|
1773
|
+
try:
|
1774
|
+
json_response = json.loads(response_text.strip())
|
1775
|
+
if not (('name' in json_response or 'function' in json_response) and
|
1776
|
+
not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
|
1777
|
+
return None
|
1778
|
+
|
1779
|
+
logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
|
1780
|
+
|
1781
|
+
# Extract the original user query from messages
|
1782
|
+
original_query = ""
|
1783
|
+
for msg in reversed(messages): # Look from the end to find the most recent user message
|
1784
|
+
if msg.get("role") == "user":
|
1785
|
+
content = msg.get("content", "")
|
1786
|
+
# Handle list content (multimodal)
|
1787
|
+
if isinstance(content, list):
|
1788
|
+
for item in content:
|
1789
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
1790
|
+
original_query = item.get("text", "")
|
1791
|
+
break
|
1792
|
+
else:
|
1793
|
+
original_query = content
|
1794
|
+
if original_query:
|
1795
|
+
break
|
1796
|
+
|
1797
|
+
# Create a shorter follow-up prompt with all tool results
|
1798
|
+
# If there's only one result, use it directly; otherwise combine them
|
1799
|
+
if len(tool_results) == 1:
|
1800
|
+
results_text = json.dumps(tool_results[0], indent=2)
|
1801
|
+
else:
|
1802
|
+
results_text = json.dumps(tool_results, indent=2)
|
1803
|
+
|
1804
|
+
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
1805
|
+
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
1806
|
+
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
1807
|
+
|
1808
|
+
# Return parameters for follow-up call
|
1809
|
+
return {
|
1810
|
+
"follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
|
1811
|
+
"original_prompt": original_prompt
|
1812
|
+
}
|
1813
|
+
|
1814
|
+
except (json.JSONDecodeError, KeyError):
|
1815
|
+
# Not a JSON response or not a tool call format
|
1816
|
+
return None
|
1817
|
+
|
1842
1818
|
def _build_completion_params(self, **override_params) -> Dict[str, Any]:
|
1843
1819
|
"""Build parameters for litellm completion calls with all necessary config"""
|
1844
1820
|
params = {
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/guardrail_result.py
RENAMED
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/llm_guardrail.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/telemetry/integration.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/calculator_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/duckduckgo_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/newspaper_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/train/data/generatecot.py
RENAMED
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents/tools/wikipedia_tools.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{praisonaiagents-0.0.117 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/dependency_links.txt
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|