praisonaiagents 0.0.116__py3-none-any.whl → 0.0.118__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/agent/agent.py +109 -10
- praisonaiagents/agents/agents.py +1 -1
- praisonaiagents/agents/autoagents.py +13 -1
- praisonaiagents/llm/llm.py +161 -185
- praisonaiagents/main.py +0 -19
- praisonaiagents/process/process.py +4 -2
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.118.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.118.dist-info}/RECORD +10 -10
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.118.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.116.dist-info → praisonaiagents-0.0.118.dist-info}/top_level.txt +0 -0
praisonaiagents/agent/agent.py
CHANGED
@@ -354,8 +354,10 @@ class Agent:
|
|
354
354
|
# Check for model name in environment variable if not provided
|
355
355
|
self._using_custom_llm = False
|
356
356
|
|
357
|
-
#
|
358
|
-
self.
|
357
|
+
# Store OpenAI client parameters for lazy initialization
|
358
|
+
self._openai_api_key = api_key
|
359
|
+
self._openai_base_url = base_url
|
360
|
+
self.__openai_client = None
|
359
361
|
|
360
362
|
# If base_url is provided, always create a custom LLM instance
|
361
363
|
if base_url:
|
@@ -488,6 +490,42 @@ Your Goal: {self.goal}
|
|
488
490
|
for source in knowledge:
|
489
491
|
self._process_knowledge(source)
|
490
492
|
|
493
|
+
@property
|
494
|
+
def _openai_client(self):
|
495
|
+
"""Lazily initialize OpenAI client only when needed."""
|
496
|
+
if self.__openai_client is None:
|
497
|
+
try:
|
498
|
+
self.__openai_client = get_openai_client(
|
499
|
+
api_key=self._openai_api_key,
|
500
|
+
base_url=self._openai_base_url
|
501
|
+
)
|
502
|
+
except ValueError as e:
|
503
|
+
# If we're using a custom LLM, we might not need the OpenAI client
|
504
|
+
# Return None and let the calling code handle it
|
505
|
+
if self._using_custom_llm:
|
506
|
+
return None
|
507
|
+
else:
|
508
|
+
raise e
|
509
|
+
return self.__openai_client
|
510
|
+
|
511
|
+
@property
|
512
|
+
def llm_model(self):
|
513
|
+
"""Unified property to get the LLM model regardless of configuration type.
|
514
|
+
|
515
|
+
Returns:
|
516
|
+
The LLM model/instance being used by this agent.
|
517
|
+
- For standard models: returns the model string (e.g., "gpt-4o")
|
518
|
+
- For custom LLM instances: returns the LLM instance object
|
519
|
+
- For provider models: returns the LLM instance object
|
520
|
+
"""
|
521
|
+
if hasattr(self, 'llm_instance') and self.llm_instance:
|
522
|
+
return self.llm_instance
|
523
|
+
elif hasattr(self, 'llm') and self.llm:
|
524
|
+
return self.llm
|
525
|
+
else:
|
526
|
+
# Default fallback
|
527
|
+
return "gpt-4o"
|
528
|
+
|
491
529
|
def _process_knowledge(self, knowledge_item):
|
492
530
|
"""Process and store knowledge from a file path, URL, or string."""
|
493
531
|
try:
|
@@ -694,14 +732,39 @@ Your Role: {self.role}\n
|
|
694
732
|
Your Goal: {self.goal}
|
695
733
|
"""
|
696
734
|
|
697
|
-
# Use openai_client's build_messages method
|
698
|
-
|
699
|
-
|
700
|
-
|
701
|
-
|
702
|
-
|
703
|
-
|
704
|
-
|
735
|
+
# Use openai_client's build_messages method if available
|
736
|
+
if self._openai_client is not None:
|
737
|
+
messages, original_prompt = self._openai_client.build_messages(
|
738
|
+
prompt=prompt,
|
739
|
+
system_prompt=system_prompt,
|
740
|
+
chat_history=self.chat_history,
|
741
|
+
output_json=output_json,
|
742
|
+
output_pydantic=output_pydantic
|
743
|
+
)
|
744
|
+
else:
|
745
|
+
# Fallback implementation for when OpenAI client is not available
|
746
|
+
messages = []
|
747
|
+
|
748
|
+
# Add system message if provided
|
749
|
+
if system_prompt:
|
750
|
+
messages.append({"role": "system", "content": system_prompt})
|
751
|
+
|
752
|
+
# Add chat history
|
753
|
+
messages.extend(self.chat_history)
|
754
|
+
|
755
|
+
# Add user prompt
|
756
|
+
if isinstance(prompt, list):
|
757
|
+
messages.extend(prompt)
|
758
|
+
original_prompt = prompt
|
759
|
+
else:
|
760
|
+
messages.append({"role": "user", "content": str(prompt)})
|
761
|
+
original_prompt = str(prompt)
|
762
|
+
|
763
|
+
# Add JSON format instruction if needed
|
764
|
+
if output_json or output_pydantic:
|
765
|
+
model = output_pydantic or output_json
|
766
|
+
json_instruction = f"\nPlease respond with valid JSON matching this schema: {model.model_json_schema()}"
|
767
|
+
messages[-1]["content"] += json_instruction
|
705
768
|
|
706
769
|
return messages, original_prompt
|
707
770
|
|
@@ -943,6 +1006,9 @@ Your Goal: {self.goal}
|
|
943
1006
|
|
944
1007
|
def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
|
945
1008
|
"""Process streaming response and return final response"""
|
1009
|
+
if self._openai_client is None:
|
1010
|
+
raise ValueError("OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider.")
|
1011
|
+
|
946
1012
|
return self._openai_client.process_stream_response(
|
947
1013
|
messages=messages,
|
948
1014
|
model=self.llm,
|
@@ -1009,6 +1075,9 @@ Your Goal: {self.goal}
|
|
1009
1075
|
|
1010
1076
|
# Note: openai_client expects tools in various formats and will format them internally
|
1011
1077
|
# But since we already have formatted_tools, we can pass them directly
|
1078
|
+
if self._openai_client is None:
|
1079
|
+
raise ValueError("OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider.")
|
1080
|
+
|
1012
1081
|
final_response = self._openai_client.chat_completion_with_tools(
|
1013
1082
|
messages=messages,
|
1014
1083
|
model=self.llm,
|
@@ -1202,6 +1271,17 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1202
1271
|
messages.append({"role": "user", "content": reflection_prompt})
|
1203
1272
|
|
1204
1273
|
try:
|
1274
|
+
# Check if OpenAI client is available
|
1275
|
+
if self._openai_client is None:
|
1276
|
+
# For custom LLMs, self-reflection with structured output is not supported
|
1277
|
+
if self.verbose:
|
1278
|
+
display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
|
1279
|
+
# Return the original response without reflection
|
1280
|
+
self.chat_history.append({"role": "user", "content": prompt})
|
1281
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1282
|
+
display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
|
1283
|
+
return response_text
|
1284
|
+
|
1205
1285
|
reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
|
1206
1286
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1207
1287
|
messages=messages,
|
@@ -1388,6 +1468,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1388
1468
|
|
1389
1469
|
# Use the new _format_tools_for_completion helper method
|
1390
1470
|
formatted_tools = self._format_tools_for_completion(tools)
|
1471
|
+
|
1472
|
+
# Check if OpenAI client is available
|
1473
|
+
if self._openai_client is None:
|
1474
|
+
error_msg = "OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider."
|
1475
|
+
display_error(error_msg)
|
1476
|
+
return None
|
1391
1477
|
|
1392
1478
|
# Make the API call based on the type of request
|
1393
1479
|
if tools:
|
@@ -1442,6 +1528,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1442
1528
|
]
|
1443
1529
|
|
1444
1530
|
try:
|
1531
|
+
# Check if OpenAI client is available for self-reflection
|
1532
|
+
if self._openai_client is None:
|
1533
|
+
# For custom LLMs, self-reflection with structured output is not supported
|
1534
|
+
if self.verbose:
|
1535
|
+
display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
|
1536
|
+
# Return the original response without reflection
|
1537
|
+
self.chat_history.append({"role": "user", "content": original_prompt})
|
1538
|
+
self.chat_history.append({"role": "assistant", "content": response_text})
|
1539
|
+
if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
|
1540
|
+
total_time = time.time() - start_time
|
1541
|
+
logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
|
1542
|
+
return response_text
|
1543
|
+
|
1445
1544
|
reflection_response = await self._openai_client.async_client.beta.chat.completions.parse(
|
1446
1545
|
model=self.reflect_llm if self.reflect_llm else self.llm,
|
1447
1546
|
messages=reflection_messages,
|
praisonaiagents/agents/agents.py
CHANGED
@@ -7,7 +7,7 @@ from pydantic import BaseModel
|
|
7
7
|
from rich.text import Text
|
8
8
|
from rich.panel import Panel
|
9
9
|
from rich.console import Console
|
10
|
-
from ..main import display_error, TaskOutput, error_logs
|
10
|
+
from ..main import display_error, TaskOutput, error_logs
|
11
11
|
from ..agent.agent import Agent
|
12
12
|
from ..task.task import Task
|
13
13
|
from ..process.process import Process, LoopItems
|
@@ -12,7 +12,8 @@ from typing import List, Any, Optional, Dict, Tuple
|
|
12
12
|
import logging
|
13
13
|
import os
|
14
14
|
from pydantic import BaseModel, ConfigDict
|
15
|
-
from ..main import display_instruction, display_tool_call, display_interaction
|
15
|
+
from ..main import display_instruction, display_tool_call, display_interaction
|
16
|
+
from ..llm import get_openai_client
|
16
17
|
|
17
18
|
# Define Pydantic models for structured output
|
18
19
|
class TaskConfig(BaseModel):
|
@@ -237,6 +238,17 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
|
|
237
238
|
"""
|
238
239
|
|
239
240
|
try:
|
241
|
+
# Get OpenAI client
|
242
|
+
try:
|
243
|
+
client = get_openai_client()
|
244
|
+
except ValueError as e:
|
245
|
+
# AutoAgents requires OpenAI for structured output generation
|
246
|
+
raise ValueError(
|
247
|
+
"AutoAgents requires OpenAI API for automatic agent generation. "
|
248
|
+
"Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
|
249
|
+
"with manually configured agents for non-OpenAI providers."
|
250
|
+
) from e
|
251
|
+
|
240
252
|
response = client.beta.chat.completions.parse(
|
241
253
|
model=self.llm,
|
242
254
|
response_format=AutoAgentsConfig,
|
praisonaiagents/llm/llm.py
CHANGED
@@ -813,99 +813,56 @@ class LLM:
|
|
813
813
|
# Make one more call to get the final summary response
|
814
814
|
# Special handling for Ollama models that don't automatically process tool results
|
815
815
|
ollama_handled = False
|
816
|
-
|
817
|
-
|
818
|
-
|
819
|
-
|
820
|
-
|
821
|
-
|
822
|
-
|
823
|
-
|
824
|
-
|
825
|
-
|
826
|
-
|
827
|
-
|
828
|
-
original_query = ""
|
829
|
-
for msg in reversed(messages): # Look from the end to find the most recent user message
|
830
|
-
if msg.get("role") == "user":
|
831
|
-
content = msg.get("content", "")
|
832
|
-
# Handle list content (multimodal)
|
833
|
-
if isinstance(content, list):
|
834
|
-
for item in content:
|
835
|
-
if isinstance(item, dict) and item.get("type") == "text":
|
836
|
-
original_query = item.get("text", "")
|
837
|
-
break
|
838
|
-
else:
|
839
|
-
original_query = content
|
840
|
-
if original_query:
|
841
|
-
break
|
842
|
-
|
843
|
-
# Create a shorter follow-up prompt with all tool results
|
844
|
-
# If there's only one result, use it directly; otherwise combine them
|
845
|
-
if len(tool_results) == 1:
|
846
|
-
results_text = json.dumps(tool_results[0], indent=2)
|
847
|
-
else:
|
848
|
-
results_text = json.dumps(tool_results, indent=2)
|
849
|
-
|
850
|
-
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
851
|
-
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
852
|
-
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
853
|
-
|
854
|
-
# Make a follow-up call to process the results
|
855
|
-
follow_up_messages = [
|
856
|
-
{"role": "user", "content": follow_up_prompt}
|
857
|
-
]
|
858
|
-
|
859
|
-
# Get response with streaming
|
860
|
-
if verbose:
|
861
|
-
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
862
|
-
response_text = ""
|
863
|
-
for chunk in litellm.completion(
|
864
|
-
**self._build_completion_params(
|
865
|
-
messages=follow_up_messages,
|
866
|
-
temperature=temperature,
|
867
|
-
stream=stream
|
868
|
-
)
|
869
|
-
):
|
870
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
871
|
-
content = chunk.choices[0].delta.content
|
872
|
-
response_text += content
|
873
|
-
live.update(display_generating(response_text, start_time))
|
874
|
-
else:
|
875
|
-
response_text = ""
|
876
|
-
for chunk in litellm.completion(
|
877
|
-
**self._build_completion_params(
|
878
|
-
messages=follow_up_messages,
|
879
|
-
temperature=temperature,
|
880
|
-
stream=stream
|
881
|
-
)
|
882
|
-
):
|
883
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
884
|
-
response_text += chunk.choices[0].delta.content
|
885
|
-
|
886
|
-
# Set flag to indicate Ollama was handled
|
887
|
-
ollama_handled = True
|
888
|
-
final_response_text = response_text.strip()
|
889
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
890
|
-
|
891
|
-
# Display the response if we got one
|
892
|
-
if final_response_text and verbose:
|
893
|
-
display_interaction(
|
894
|
-
original_prompt,
|
895
|
-
final_response_text,
|
896
|
-
markdown=markdown,
|
897
|
-
generation_time=time.time() - start_time,
|
898
|
-
console=console
|
816
|
+
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
817
|
+
|
818
|
+
if ollama_params:
|
819
|
+
# Get response with streaming
|
820
|
+
if verbose:
|
821
|
+
with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
|
822
|
+
response_text = ""
|
823
|
+
for chunk in litellm.completion(
|
824
|
+
**self._build_completion_params(
|
825
|
+
messages=ollama_params["follow_up_messages"],
|
826
|
+
temperature=temperature,
|
827
|
+
stream=stream
|
899
828
|
)
|
900
|
-
|
901
|
-
|
902
|
-
|
903
|
-
|
904
|
-
|
905
|
-
|
906
|
-
|
907
|
-
|
908
|
-
|
829
|
+
):
|
830
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
831
|
+
content = chunk.choices[0].delta.content
|
832
|
+
response_text += content
|
833
|
+
live.update(display_generating(response_text, start_time))
|
834
|
+
else:
|
835
|
+
response_text = ""
|
836
|
+
for chunk in litellm.completion(
|
837
|
+
**self._build_completion_params(
|
838
|
+
messages=ollama_params["follow_up_messages"],
|
839
|
+
temperature=temperature,
|
840
|
+
stream=stream
|
841
|
+
)
|
842
|
+
):
|
843
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
844
|
+
response_text += chunk.choices[0].delta.content
|
845
|
+
|
846
|
+
# Set flag to indicate Ollama was handled
|
847
|
+
ollama_handled = True
|
848
|
+
final_response_text = response_text.strip()
|
849
|
+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
850
|
+
|
851
|
+
# Display the response if we got one
|
852
|
+
if final_response_text and verbose:
|
853
|
+
display_interaction(
|
854
|
+
ollama_params["original_prompt"],
|
855
|
+
final_response_text,
|
856
|
+
markdown=markdown,
|
857
|
+
generation_time=time.time() - start_time,
|
858
|
+
console=console
|
859
|
+
)
|
860
|
+
|
861
|
+
# Return the final response after processing Ollama's follow-up
|
862
|
+
if final_response_text:
|
863
|
+
return final_response_text
|
864
|
+
else:
|
865
|
+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
909
866
|
|
910
867
|
# If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
|
911
868
|
if reasoning_steps and not ollama_handled:
|
@@ -1480,99 +1437,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1480
1437
|
|
1481
1438
|
# Special handling for Ollama models that don't automatically process tool results
|
1482
1439
|
ollama_handled = False
|
1483
|
-
|
1484
|
-
|
1485
|
-
|
1486
|
-
|
1487
|
-
|
1488
|
-
|
1489
|
-
|
1490
|
-
|
1491
|
-
|
1492
|
-
|
1493
|
-
|
1494
|
-
|
1495
|
-
|
1496
|
-
|
1497
|
-
|
1498
|
-
|
1499
|
-
|
1500
|
-
|
1501
|
-
|
1502
|
-
|
1503
|
-
|
1504
|
-
|
1505
|
-
|
1506
|
-
|
1507
|
-
|
1508
|
-
|
1509
|
-
|
1510
|
-
|
1511
|
-
|
1512
|
-
|
1513
|
-
|
1514
|
-
|
1515
|
-
|
1516
|
-
|
1517
|
-
|
1518
|
-
|
1519
|
-
|
1520
|
-
|
1521
|
-
|
1522
|
-
|
1523
|
-
|
1524
|
-
|
1525
|
-
|
1526
|
-
|
1527
|
-
|
1528
|
-
|
1529
|
-
|
1530
|
-
|
1531
|
-
|
1532
|
-
|
1533
|
-
stream=stream
|
1534
|
-
)
|
1535
|
-
):
|
1536
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1537
|
-
content = chunk.choices[0].delta.content
|
1538
|
-
response_text += content
|
1539
|
-
print("\033[K", end="\r")
|
1540
|
-
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
|
1541
|
-
else:
|
1542
|
-
response_text = ""
|
1543
|
-
async for chunk in await litellm.acompletion(
|
1544
|
-
**self._build_completion_params(
|
1545
|
-
messages=follow_up_messages,
|
1546
|
-
temperature=temperature,
|
1547
|
-
stream=stream
|
1548
|
-
)
|
1549
|
-
):
|
1550
|
-
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1551
|
-
response_text += chunk.choices[0].delta.content
|
1552
|
-
|
1553
|
-
# Set flag to indicate Ollama was handled
|
1554
|
-
ollama_handled = True
|
1555
|
-
final_response_text = response_text.strip()
|
1556
|
-
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
1557
|
-
|
1558
|
-
# Display the response if we got one
|
1559
|
-
if final_response_text and verbose:
|
1560
|
-
display_interaction(
|
1561
|
-
original_prompt,
|
1562
|
-
final_response_text,
|
1563
|
-
markdown=markdown,
|
1564
|
-
generation_time=time.time() - start_time,
|
1565
|
-
console=console
|
1566
|
-
)
|
1567
|
-
|
1568
|
-
# Return the final response after processing Ollama's follow-up
|
1569
|
-
if final_response_text:
|
1570
|
-
return final_response_text
|
1571
|
-
else:
|
1572
|
-
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
1573
|
-
except (json.JSONDecodeError, KeyError):
|
1574
|
-
# Not a JSON response or not a tool call format, continue normally
|
1575
|
-
pass
|
1440
|
+
ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
|
1441
|
+
|
1442
|
+
if ollama_params:
|
1443
|
+
# Get response with streaming
|
1444
|
+
if verbose:
|
1445
|
+
response_text = ""
|
1446
|
+
async for chunk in await litellm.acompletion(
|
1447
|
+
**self._build_completion_params(
|
1448
|
+
messages=ollama_params["follow_up_messages"],
|
1449
|
+
temperature=temperature,
|
1450
|
+
stream=stream
|
1451
|
+
)
|
1452
|
+
):
|
1453
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1454
|
+
content = chunk.choices[0].delta.content
|
1455
|
+
response_text += content
|
1456
|
+
print("\033[K", end="\r")
|
1457
|
+
print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
|
1458
|
+
else:
|
1459
|
+
response_text = ""
|
1460
|
+
async for chunk in await litellm.acompletion(
|
1461
|
+
**self._build_completion_params(
|
1462
|
+
messages=ollama_params["follow_up_messages"],
|
1463
|
+
temperature=temperature,
|
1464
|
+
stream=stream
|
1465
|
+
)
|
1466
|
+
):
|
1467
|
+
if chunk and chunk.choices and chunk.choices[0].delta.content:
|
1468
|
+
response_text += chunk.choices[0].delta.content
|
1469
|
+
|
1470
|
+
# Set flag to indicate Ollama was handled
|
1471
|
+
ollama_handled = True
|
1472
|
+
final_response_text = response_text.strip()
|
1473
|
+
logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
|
1474
|
+
|
1475
|
+
# Display the response if we got one
|
1476
|
+
if final_response_text and verbose:
|
1477
|
+
display_interaction(
|
1478
|
+
ollama_params["original_prompt"],
|
1479
|
+
final_response_text,
|
1480
|
+
markdown=markdown,
|
1481
|
+
generation_time=time.time() - start_time,
|
1482
|
+
console=console
|
1483
|
+
)
|
1484
|
+
|
1485
|
+
# Return the final response after processing Ollama's follow-up
|
1486
|
+
if final_response_text:
|
1487
|
+
return final_response_text
|
1488
|
+
else:
|
1489
|
+
logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
|
1576
1490
|
|
1577
1491
|
# If no special handling was needed or if it's not an Ollama model
|
1578
1492
|
if reasoning_steps and not ollama_handled:
|
@@ -1839,6 +1753,68 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
|
|
1839
1753
|
|
1840
1754
|
litellm.callbacks = events
|
1841
1755
|
|
1756
|
+
def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
|
1757
|
+
"""
|
1758
|
+
Handle special Ollama model requirements when processing tool results.
|
1759
|
+
|
1760
|
+
Args:
|
1761
|
+
response_text: The initial response text from the model
|
1762
|
+
tool_results: List of tool execution results
|
1763
|
+
messages: The conversation messages list
|
1764
|
+
original_prompt: The original user prompt
|
1765
|
+
|
1766
|
+
Returns:
|
1767
|
+
Dict with follow-up parameters if Ollama needs special handling, None otherwise
|
1768
|
+
"""
|
1769
|
+
if not self._is_ollama_provider() or not tool_results:
|
1770
|
+
return None
|
1771
|
+
|
1772
|
+
# Check if the response is just a JSON tool call
|
1773
|
+
try:
|
1774
|
+
json_response = json.loads(response_text.strip())
|
1775
|
+
if not (('name' in json_response or 'function' in json_response) and
|
1776
|
+
not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
|
1777
|
+
return None
|
1778
|
+
|
1779
|
+
logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
|
1780
|
+
|
1781
|
+
# Extract the original user query from messages
|
1782
|
+
original_query = ""
|
1783
|
+
for msg in reversed(messages): # Look from the end to find the most recent user message
|
1784
|
+
if msg.get("role") == "user":
|
1785
|
+
content = msg.get("content", "")
|
1786
|
+
# Handle list content (multimodal)
|
1787
|
+
if isinstance(content, list):
|
1788
|
+
for item in content:
|
1789
|
+
if isinstance(item, dict) and item.get("type") == "text":
|
1790
|
+
original_query = item.get("text", "")
|
1791
|
+
break
|
1792
|
+
else:
|
1793
|
+
original_query = content
|
1794
|
+
if original_query:
|
1795
|
+
break
|
1796
|
+
|
1797
|
+
# Create a shorter follow-up prompt with all tool results
|
1798
|
+
# If there's only one result, use it directly; otherwise combine them
|
1799
|
+
if len(tool_results) == 1:
|
1800
|
+
results_text = json.dumps(tool_results[0], indent=2)
|
1801
|
+
else:
|
1802
|
+
results_text = json.dumps(tool_results, indent=2)
|
1803
|
+
|
1804
|
+
follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
|
1805
|
+
logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
|
1806
|
+
logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
|
1807
|
+
|
1808
|
+
# Return parameters for follow-up call
|
1809
|
+
return {
|
1810
|
+
"follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
|
1811
|
+
"original_prompt": original_prompt
|
1812
|
+
}
|
1813
|
+
|
1814
|
+
except (json.JSONDecodeError, KeyError):
|
1815
|
+
# Not a JSON response or not a tool call format
|
1816
|
+
return None
|
1817
|
+
|
1842
1818
|
def _build_completion_params(self, **override_params) -> Dict[str, Any]:
|
1843
1819
|
"""Build parameters for litellm completion calls with all necessary config"""
|
1844
1820
|
params = {
|
praisonaiagents/main.py
CHANGED
@@ -3,7 +3,6 @@ import time
|
|
3
3
|
import json
|
4
4
|
import logging
|
5
5
|
from typing import List, Optional, Dict, Any, Union, Literal, Type
|
6
|
-
from openai import OpenAI
|
7
6
|
from pydantic import BaseModel, ConfigDict
|
8
7
|
from rich import print
|
9
8
|
from rich.console import Console
|
@@ -377,24 +376,6 @@ class ReflectionOutput(BaseModel):
|
|
377
376
|
reflection: str
|
378
377
|
satisfactory: Literal["yes", "no"]
|
379
378
|
|
380
|
-
# Constants
|
381
|
-
LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
|
382
|
-
|
383
|
-
# Initialize OpenAI client with proper API key handling
|
384
|
-
api_key = os.environ.get("OPENAI_API_KEY")
|
385
|
-
base_url = os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
|
386
|
-
|
387
|
-
# For local servers like LM Studio, allow minimal API key
|
388
|
-
if base_url and not api_key:
|
389
|
-
api_key = LOCAL_SERVER_API_KEY_PLACEHOLDER
|
390
|
-
elif not api_key:
|
391
|
-
raise ValueError(
|
392
|
-
"OPENAI_API_KEY environment variable is required for the default OpenAI service. "
|
393
|
-
"If you are targeting a local server (e.g., LM Studio), ensure OPENAI_API_BASE is set "
|
394
|
-
f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
|
395
|
-
)
|
396
|
-
|
397
|
-
client = OpenAI(api_key=api_key, base_url=base_url)
|
398
379
|
|
399
380
|
class TaskOutput(BaseModel):
|
400
381
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
@@ -5,10 +5,10 @@ from typing import Dict, Optional, List, Any, AsyncGenerator
|
|
5
5
|
from pydantic import BaseModel, ConfigDict
|
6
6
|
from ..agent.agent import Agent
|
7
7
|
from ..task.task import Task
|
8
|
-
from ..main import display_error
|
8
|
+
from ..main import display_error
|
9
9
|
import csv
|
10
10
|
import os
|
11
|
-
from openai import AsyncOpenAI
|
11
|
+
from openai import AsyncOpenAI, OpenAI
|
12
12
|
|
13
13
|
class LoopItems(BaseModel):
|
14
14
|
model_config = ConfigDict(arbitrary_types_allowed=True)
|
@@ -140,6 +140,8 @@ class Process:
|
|
140
140
|
|
141
141
|
def _get_manager_instructions_with_fallback(self, manager_task, manager_prompt, ManagerInstructions):
|
142
142
|
"""Sync version of getting manager instructions with fallback"""
|
143
|
+
# Create OpenAI client
|
144
|
+
client = OpenAI()
|
143
145
|
try:
|
144
146
|
# First try structured output (OpenAI compatible)
|
145
147
|
logging.info("Attempting structured output...")
|
@@ -1,14 +1,14 @@
|
|
1
1
|
praisonaiagents/__init__.py,sha256=TezvgadS1p5FGnIRAUVOB_6Jzb3Of7ZtzjtyeCqRsmM,3017
|
2
2
|
praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
|
3
|
-
praisonaiagents/main.py,sha256=
|
3
|
+
praisonaiagents/main.py,sha256=bamnEu5PaekloGi52VqAFclm-HzjEVeKtWF0Zpdmfzs,15479
|
4
4
|
praisonaiagents/session.py,sha256=d-CZPYikOHb0q-H9f_IWKJsypnQfz1YKeLLkyxs6oDo,15532
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=IhIDtAkfJ99cxbttwou52coih_AejS2-jpazsX6LbDY,350
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=oZaMJJXoWOWJVOFSLmnoBEpF9rb54pnvSqZHgiOhzAw,108660
|
7
7
|
praisonaiagents/agent/handoff.py,sha256=Saq0chqfvC6Zf5UbXvmctybbehqnotrXn72JsS-76Q0,13099
|
8
8
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
9
9
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
10
|
-
praisonaiagents/agents/agents.py,sha256=
|
11
|
-
praisonaiagents/agents/autoagents.py,sha256=
|
10
|
+
praisonaiagents/agents/agents.py,sha256=WnptTEMSDMAM30Ka6rOAu6rBD-ZLev3qphb1a3BbP1g,63301
|
11
|
+
praisonaiagents/agents/autoagents.py,sha256=njkcv7wgDjrUd5auLL3rMc7qv20Kfo40zdn49UxWR9k,14235
|
12
12
|
praisonaiagents/guardrails/__init__.py,sha256=HA8zhp-KRHTxo0194MUwXOUJjPyjOu7E3d7xUIKYVVY,310
|
13
13
|
praisonaiagents/guardrails/guardrail_result.py,sha256=2K1WIYRyT_s1H6vBGa-7HEHzXCFIyZXZVY4f0hnQyWc,1352
|
14
14
|
praisonaiagents/guardrails/llm_guardrail.py,sha256=MTTqmYDdZX-18QN9T17T5P_6H2qnV8GVgymJufW1WuM,3277
|
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
|
|
16
16
|
praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
|
17
17
|
praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
|
18
18
|
praisonaiagents/llm/__init__.py,sha256=6lTeQ8jWi1-KiwjCDCmkHo2e-bRLq2dP0s5iJWqjO3s,1421
|
19
|
-
praisonaiagents/llm/llm.py,sha256
|
19
|
+
praisonaiagents/llm/llm.py,sha256=1AGCRpriOx_c3h_Bn6_DNzI1HHz0PM6jieq7RDysA68,109172
|
20
20
|
praisonaiagents/llm/openai_client.py,sha256=0JvjCDHoH8I8kIt5vvObARkGdVaPWdTIv_FoEQ5EQPA,48973
|
21
21
|
praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
|
22
22
|
praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
|
@@ -24,7 +24,7 @@ praisonaiagents/mcp/mcp_sse.py,sha256=z8TMFhW9xuLQ7QnpOa3n1-nSHt0-Bf27qso0u4qxYS
|
|
24
24
|
praisonaiagents/memory/__init__.py,sha256=aEFdhgtTqDdMhc_JCWM-f4XI9cZIj7Wz5g_MUa-0amg,397
|
25
25
|
praisonaiagents/memory/memory.py,sha256=D5BmQTktv6VOJ49yW2m1MjjCJ5UDSX1Qo46_443ymKo,44276
|
26
26
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
27
|
-
praisonaiagents/process/process.py,sha256=
|
27
|
+
praisonaiagents/process/process.py,sha256=NTc9rbelIdEA-S1Nwd79OVeKA7mmoZWXhD5r8S91LNs,66624
|
28
28
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
29
29
|
praisonaiagents/task/task.py,sha256=imqJ8wzZzVyUSym2EyF2tC-vAsV1UdfI_P3YM5mqAiw,20786
|
30
30
|
praisonaiagents/telemetry/__init__.py,sha256=5iAOrj_N_cKMmh2ltWGYs3PfOYt_jcwUoElW8fTAIsc,3062
|
@@ -53,7 +53,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
53
53
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
54
54
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
55
55
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
56
|
-
praisonaiagents-0.0.
|
57
|
-
praisonaiagents-0.0.
|
58
|
-
praisonaiagents-0.0.
|
59
|
-
praisonaiagents-0.0.
|
56
|
+
praisonaiagents-0.0.118.dist-info/METADATA,sha256=AmgmO7hID63By-em4e2hDiWT10KeiXDpTL39qpxCDQY,1669
|
57
|
+
praisonaiagents-0.0.118.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
58
|
+
praisonaiagents-0.0.118.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
59
|
+
praisonaiagents-0.0.118.dist-info/RECORD,,
|
File without changes
|
File without changes
|