praisonaiagents 0.0.116__tar.gz → 0.0.118__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/agent/agent.py +109 -10
  3. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/agents/agents.py +1 -1
  4. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/agents/autoagents.py +13 -1
  5. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/llm/llm.py +161 -185
  6. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/main.py +0 -19
  7. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/process/process.py +4 -2
  8. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/PKG-INFO +1 -1
  9. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/pyproject.toml +1 -1
  10. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/README.md +0 -0
  11. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/__init__.py +0 -0
  12. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/agent/__init__.py +0 -0
  13. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/agent/handoff.py +0 -0
  14. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/agent/image_agent.py +0 -0
  15. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/agents/__init__.py +0 -0
  16. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/approval.py +0 -0
  17. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/__init__.py +0 -0
  18. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  19. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  20. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/knowledge/__init__.py +0 -0
  21. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/knowledge/chunking.py +0 -0
  22. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/knowledge/knowledge.py +0 -0
  23. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/llm/__init__.py +0 -0
  24. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/llm/openai_client.py +0 -0
  25. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/mcp/__init__.py +0 -0
  26. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/mcp/mcp.py +0 -0
  27. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/mcp/mcp_sse.py +0 -0
  28. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/memory/__init__.py +0 -0
  29. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/memory/memory.py +0 -0
  30. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/process/__init__.py +0 -0
  31. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/session.py +0 -0
  32. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/task/__init__.py +0 -0
  33. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/task/task.py +0 -0
  34. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/telemetry/__init__.py +0 -0
  35. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/telemetry/integration.py +0 -0
  36. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/telemetry/telemetry.py +0 -0
  37. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/README.md +0 -0
  38. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/__init__.py +0 -0
  39. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/arxiv_tools.py +0 -0
  40. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/calculator_tools.py +0 -0
  41. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/csv_tools.py +0 -0
  42. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/duckdb_tools.py +0 -0
  43. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  44. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/excel_tools.py +0 -0
  45. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/file_tools.py +0 -0
  46. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/json_tools.py +0 -0
  47. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/newspaper_tools.py +0 -0
  48. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/pandas_tools.py +0 -0
  49. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/python_tools.py +0 -0
  50. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/searxng_tools.py +0 -0
  51. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/shell_tools.py +0 -0
  52. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/spider_tools.py +0 -0
  53. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/test.py +0 -0
  54. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/tools.py +0 -0
  55. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  56. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  57. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/xml_tools.py +0 -0
  58. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/yaml_tools.py +0 -0
  59. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents/tools/yfinance_tools.py +0 -0
  60. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  61. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  62. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/requires.txt +0 -0
  63. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/praisonaiagents.egg-info/top_level.txt +0 -0
  64. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/setup.cfg +0 -0
  65. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/tests/test-graph-memory.py +0 -0
  66. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/tests/test.py +0 -0
  67. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/tests/test_handoff_compatibility.py +0 -0
  68. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/tests/test_ollama_async_fix.py +0 -0
  69. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/tests/test_ollama_fix.py +0 -0
  70. {praisonaiagents-0.0.116 → praisonaiagents-0.0.118}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.116
3
+ Version: 0.0.118
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -354,8 +354,10 @@ class Agent:
354
354
  # Check for model name in environment variable if not provided
355
355
  self._using_custom_llm = False
356
356
 
357
- # Initialize OpenAI client for direct API calls
358
- self._openai_client = get_openai_client(api_key=api_key, base_url=base_url)
357
+ # Store OpenAI client parameters for lazy initialization
358
+ self._openai_api_key = api_key
359
+ self._openai_base_url = base_url
360
+ self.__openai_client = None
359
361
 
360
362
  # If base_url is provided, always create a custom LLM instance
361
363
  if base_url:
@@ -488,6 +490,42 @@ Your Goal: {self.goal}
488
490
  for source in knowledge:
489
491
  self._process_knowledge(source)
490
492
 
493
+ @property
494
+ def _openai_client(self):
495
+ """Lazily initialize OpenAI client only when needed."""
496
+ if self.__openai_client is None:
497
+ try:
498
+ self.__openai_client = get_openai_client(
499
+ api_key=self._openai_api_key,
500
+ base_url=self._openai_base_url
501
+ )
502
+ except ValueError as e:
503
+ # If we're using a custom LLM, we might not need the OpenAI client
504
+ # Return None and let the calling code handle it
505
+ if self._using_custom_llm:
506
+ return None
507
+ else:
508
+ raise e
509
+ return self.__openai_client
510
+
511
+ @property
512
+ def llm_model(self):
513
+ """Unified property to get the LLM model regardless of configuration type.
514
+
515
+ Returns:
516
+ The LLM model/instance being used by this agent.
517
+ - For standard models: returns the model string (e.g., "gpt-4o")
518
+ - For custom LLM instances: returns the LLM instance object
519
+ - For provider models: returns the LLM instance object
520
+ """
521
+ if hasattr(self, 'llm_instance') and self.llm_instance:
522
+ return self.llm_instance
523
+ elif hasattr(self, 'llm') and self.llm:
524
+ return self.llm
525
+ else:
526
+ # Default fallback
527
+ return "gpt-4o"
528
+
491
529
  def _process_knowledge(self, knowledge_item):
492
530
  """Process and store knowledge from a file path, URL, or string."""
493
531
  try:
@@ -694,14 +732,39 @@ Your Role: {self.role}\n
694
732
  Your Goal: {self.goal}
695
733
  """
696
734
 
697
- # Use openai_client's build_messages method
698
- messages, original_prompt = self._openai_client.build_messages(
699
- prompt=prompt,
700
- system_prompt=system_prompt,
701
- chat_history=self.chat_history,
702
- output_json=output_json,
703
- output_pydantic=output_pydantic
704
- )
735
+ # Use openai_client's build_messages method if available
736
+ if self._openai_client is not None:
737
+ messages, original_prompt = self._openai_client.build_messages(
738
+ prompt=prompt,
739
+ system_prompt=system_prompt,
740
+ chat_history=self.chat_history,
741
+ output_json=output_json,
742
+ output_pydantic=output_pydantic
743
+ )
744
+ else:
745
+ # Fallback implementation for when OpenAI client is not available
746
+ messages = []
747
+
748
+ # Add system message if provided
749
+ if system_prompt:
750
+ messages.append({"role": "system", "content": system_prompt})
751
+
752
+ # Add chat history
753
+ messages.extend(self.chat_history)
754
+
755
+ # Add user prompt
756
+ if isinstance(prompt, list):
757
+ messages.extend(prompt)
758
+ original_prompt = prompt
759
+ else:
760
+ messages.append({"role": "user", "content": str(prompt)})
761
+ original_prompt = str(prompt)
762
+
763
+ # Add JSON format instruction if needed
764
+ if output_json or output_pydantic:
765
+ model = output_pydantic or output_json
766
+ json_instruction = f"\nPlease respond with valid JSON matching this schema: {model.model_json_schema()}"
767
+ messages[-1]["content"] += json_instruction
705
768
 
706
769
  return messages, original_prompt
707
770
 
@@ -943,6 +1006,9 @@ Your Goal: {self.goal}
943
1006
 
944
1007
  def _process_stream_response(self, messages, temperature, start_time, formatted_tools=None, reasoning_steps=False):
945
1008
  """Process streaming response and return final response"""
1009
+ if self._openai_client is None:
1010
+ raise ValueError("OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider.")
1011
+
946
1012
  return self._openai_client.process_stream_response(
947
1013
  messages=messages,
948
1014
  model=self.llm,
@@ -1009,6 +1075,9 @@ Your Goal: {self.goal}
1009
1075
 
1010
1076
  # Note: openai_client expects tools in various formats and will format them internally
1011
1077
  # But since we already have formatted_tools, we can pass them directly
1078
+ if self._openai_client is None:
1079
+ raise ValueError("OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider.")
1080
+
1012
1081
  final_response = self._openai_client.chat_completion_with_tools(
1013
1082
  messages=messages,
1014
1083
  model=self.llm,
@@ -1202,6 +1271,17 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1202
1271
  messages.append({"role": "user", "content": reflection_prompt})
1203
1272
 
1204
1273
  try:
1274
+ # Check if OpenAI client is available
1275
+ if self._openai_client is None:
1276
+ # For custom LLMs, self-reflection with structured output is not supported
1277
+ if self.verbose:
1278
+ display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
1279
+ # Return the original response without reflection
1280
+ self.chat_history.append({"role": "user", "content": prompt})
1281
+ self.chat_history.append({"role": "assistant", "content": response_text})
1282
+ display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1283
+ return response_text
1284
+
1205
1285
  reflection_response = self._openai_client.sync_client.beta.chat.completions.parse(
1206
1286
  model=self.reflect_llm if self.reflect_llm else self.llm,
1207
1287
  messages=messages,
@@ -1388,6 +1468,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1388
1468
 
1389
1469
  # Use the new _format_tools_for_completion helper method
1390
1470
  formatted_tools = self._format_tools_for_completion(tools)
1471
+
1472
+ # Check if OpenAI client is available
1473
+ if self._openai_client is None:
1474
+ error_msg = "OpenAI client is not initialized. Please provide OPENAI_API_KEY or use a custom LLM provider."
1475
+ display_error(error_msg)
1476
+ return None
1391
1477
 
1392
1478
  # Make the API call based on the type of request
1393
1479
  if tools:
@@ -1442,6 +1528,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1442
1528
  ]
1443
1529
 
1444
1530
  try:
1531
+ # Check if OpenAI client is available for self-reflection
1532
+ if self._openai_client is None:
1533
+ # For custom LLMs, self-reflection with structured output is not supported
1534
+ if self.verbose:
1535
+ display_self_reflection(f"Agent {self.name}: Self-reflection with structured output is not supported for custom LLM providers. Skipping reflection.", console=self.console)
1536
+ # Return the original response without reflection
1537
+ self.chat_history.append({"role": "user", "content": original_prompt})
1538
+ self.chat_history.append({"role": "assistant", "content": response_text})
1539
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1540
+ total_time = time.time() - start_time
1541
+ logging.debug(f"Agent.achat completed in {total_time:.2f} seconds")
1542
+ return response_text
1543
+
1445
1544
  reflection_response = await self._openai_client.async_client.beta.chat.completions.parse(
1446
1545
  model=self.reflect_llm if self.reflect_llm else self.llm,
1447
1546
  messages=reflection_messages,
@@ -7,7 +7,7 @@ from pydantic import BaseModel
7
7
  from rich.text import Text
8
8
  from rich.panel import Panel
9
9
  from rich.console import Console
10
- from ..main import display_error, TaskOutput, error_logs, client
10
+ from ..main import display_error, TaskOutput, error_logs
11
11
  from ..agent.agent import Agent
12
12
  from ..task.task import Task
13
13
  from ..process.process import Process, LoopItems
@@ -12,7 +12,8 @@ from typing import List, Any, Optional, Dict, Tuple
12
12
  import logging
13
13
  import os
14
14
  from pydantic import BaseModel, ConfigDict
15
- from ..main import display_instruction, display_tool_call, display_interaction, client
15
+ from ..main import display_instruction, display_tool_call, display_interaction
16
+ from ..llm import get_openai_client
16
17
 
17
18
  # Define Pydantic models for structured output
18
19
  class TaskConfig(BaseModel):
@@ -237,6 +238,17 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
237
238
  """
238
239
 
239
240
  try:
241
+ # Get OpenAI client
242
+ try:
243
+ client = get_openai_client()
244
+ except ValueError as e:
245
+ # AutoAgents requires OpenAI for structured output generation
246
+ raise ValueError(
247
+ "AutoAgents requires OpenAI API for automatic agent generation. "
248
+ "Please set OPENAI_API_KEY environment variable or use PraisonAIAgents class directly "
249
+ "with manually configured agents for non-OpenAI providers."
250
+ ) from e
251
+
240
252
  response = client.beta.chat.completions.parse(
241
253
  model=self.llm,
242
254
  response_format=AutoAgentsConfig,
@@ -813,99 +813,56 @@ class LLM:
813
813
  # Make one more call to get the final summary response
814
814
  # Special handling for Ollama models that don't automatically process tool results
815
815
  ollama_handled = False
816
- if self.model and self.model.startswith("ollama/") and tool_results:
817
- # For Ollama models, we need to explicitly ask the model to process the tool results
818
- # First, check if the response is just a JSON tool call
819
- try:
820
- # If the response_text is a valid JSON that looks like a tool call,
821
- # we need to make a follow-up call to process the results
822
- json_response = json.loads(response_text.strip())
823
- if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
824
- logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")
825
-
826
- # Create a prompt that asks the model to process the tool results based on original context
827
- # Extract the original user query from messages
828
- original_query = ""
829
- for msg in reversed(messages): # Look from the end to find the most recent user message
830
- if msg.get("role") == "user":
831
- content = msg.get("content", "")
832
- # Handle list content (multimodal)
833
- if isinstance(content, list):
834
- for item in content:
835
- if isinstance(item, dict) and item.get("type") == "text":
836
- original_query = item.get("text", "")
837
- break
838
- else:
839
- original_query = content
840
- if original_query:
841
- break
842
-
843
- # Create a shorter follow-up prompt with all tool results
844
- # If there's only one result, use it directly; otherwise combine them
845
- if len(tool_results) == 1:
846
- results_text = json.dumps(tool_results[0], indent=2)
847
- else:
848
- results_text = json.dumps(tool_results, indent=2)
849
-
850
- follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
851
- logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
852
- logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
853
-
854
- # Make a follow-up call to process the results
855
- follow_up_messages = [
856
- {"role": "user", "content": follow_up_prompt}
857
- ]
858
-
859
- # Get response with streaming
860
- if verbose:
861
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
862
- response_text = ""
863
- for chunk in litellm.completion(
864
- **self._build_completion_params(
865
- messages=follow_up_messages,
866
- temperature=temperature,
867
- stream=stream
868
- )
869
- ):
870
- if chunk and chunk.choices and chunk.choices[0].delta.content:
871
- content = chunk.choices[0].delta.content
872
- response_text += content
873
- live.update(display_generating(response_text, start_time))
874
- else:
875
- response_text = ""
876
- for chunk in litellm.completion(
877
- **self._build_completion_params(
878
- messages=follow_up_messages,
879
- temperature=temperature,
880
- stream=stream
881
- )
882
- ):
883
- if chunk and chunk.choices and chunk.choices[0].delta.content:
884
- response_text += chunk.choices[0].delta.content
885
-
886
- # Set flag to indicate Ollama was handled
887
- ollama_handled = True
888
- final_response_text = response_text.strip()
889
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
890
-
891
- # Display the response if we got one
892
- if final_response_text and verbose:
893
- display_interaction(
894
- original_prompt,
895
- final_response_text,
896
- markdown=markdown,
897
- generation_time=time.time() - start_time,
898
- console=console
816
+ ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
817
+
818
+ if ollama_params:
819
+ # Get response with streaming
820
+ if verbose:
821
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
822
+ response_text = ""
823
+ for chunk in litellm.completion(
824
+ **self._build_completion_params(
825
+ messages=ollama_params["follow_up_messages"],
826
+ temperature=temperature,
827
+ stream=stream
899
828
  )
900
-
901
- # Return the final response after processing Ollama's follow-up
902
- if final_response_text:
903
- return final_response_text
904
- else:
905
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
906
- except (json.JSONDecodeError, KeyError):
907
- # Not a JSON response or not a tool call format, continue normally
908
- pass
829
+ ):
830
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
831
+ content = chunk.choices[0].delta.content
832
+ response_text += content
833
+ live.update(display_generating(response_text, start_time))
834
+ else:
835
+ response_text = ""
836
+ for chunk in litellm.completion(
837
+ **self._build_completion_params(
838
+ messages=ollama_params["follow_up_messages"],
839
+ temperature=temperature,
840
+ stream=stream
841
+ )
842
+ ):
843
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
844
+ response_text += chunk.choices[0].delta.content
845
+
846
+ # Set flag to indicate Ollama was handled
847
+ ollama_handled = True
848
+ final_response_text = response_text.strip()
849
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
850
+
851
+ # Display the response if we got one
852
+ if final_response_text and verbose:
853
+ display_interaction(
854
+ ollama_params["original_prompt"],
855
+ final_response_text,
856
+ markdown=markdown,
857
+ generation_time=time.time() - start_time,
858
+ console=console
859
+ )
860
+
861
+ # Return the final response after processing Ollama's follow-up
862
+ if final_response_text:
863
+ return final_response_text
864
+ else:
865
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
909
866
 
910
867
  # If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
911
868
  if reasoning_steps and not ollama_handled:
@@ -1480,99 +1437,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1480
1437
 
1481
1438
  # Special handling for Ollama models that don't automatically process tool results
1482
1439
  ollama_handled = False
1483
- if self._is_ollama_provider() and tool_results:
1484
- # For Ollama models, we need to explicitly ask the model to process the tool results
1485
- # First, check if the response is just a JSON tool call
1486
- try:
1487
- # If the response_text is a valid JSON that looks like a tool call,
1488
- # we need to make a follow-up call to process the results
1489
- json_response = json.loads(response_text.strip())
1490
- if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
1491
- logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
1492
-
1493
- # Create a prompt that asks the model to process the tool results based on original context
1494
- # Extract the original user query from messages
1495
- original_query = ""
1496
- for msg in reversed(messages): # Look from the end to find the most recent user message
1497
- if msg.get("role") == "user":
1498
- content = msg.get("content", "")
1499
- # Handle list content (multimodal)
1500
- if isinstance(content, list):
1501
- for item in content:
1502
- if isinstance(item, dict) and item.get("type") == "text":
1503
- original_query = item.get("text", "")
1504
- break
1505
- else:
1506
- original_query = content
1507
- if original_query:
1508
- break
1509
-
1510
- # Create a shorter follow-up prompt with all tool results
1511
- # If there's only one result, use it directly; otherwise combine them
1512
- if len(tool_results) == 1:
1513
- results_text = json.dumps(tool_results[0], indent=2)
1514
- else:
1515
- results_text = json.dumps(tool_results, indent=2)
1516
-
1517
- follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1518
- logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1519
- logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1520
-
1521
- # Make a follow-up call to process the results
1522
- follow_up_messages = [
1523
- {"role": "user", "content": follow_up_prompt}
1524
- ]
1525
-
1526
- # Get response with streaming
1527
- if verbose:
1528
- response_text = ""
1529
- async for chunk in await litellm.acompletion(
1530
- **self._build_completion_params(
1531
- messages=follow_up_messages,
1532
- temperature=temperature,
1533
- stream=stream
1534
- )
1535
- ):
1536
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1537
- content = chunk.choices[0].delta.content
1538
- response_text += content
1539
- print("\033[K", end="\r")
1540
- print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1541
- else:
1542
- response_text = ""
1543
- async for chunk in await litellm.acompletion(
1544
- **self._build_completion_params(
1545
- messages=follow_up_messages,
1546
- temperature=temperature,
1547
- stream=stream
1548
- )
1549
- ):
1550
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1551
- response_text += chunk.choices[0].delta.content
1552
-
1553
- # Set flag to indicate Ollama was handled
1554
- ollama_handled = True
1555
- final_response_text = response_text.strip()
1556
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1557
-
1558
- # Display the response if we got one
1559
- if final_response_text and verbose:
1560
- display_interaction(
1561
- original_prompt,
1562
- final_response_text,
1563
- markdown=markdown,
1564
- generation_time=time.time() - start_time,
1565
- console=console
1566
- )
1567
-
1568
- # Return the final response after processing Ollama's follow-up
1569
- if final_response_text:
1570
- return final_response_text
1571
- else:
1572
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1573
- except (json.JSONDecodeError, KeyError):
1574
- # Not a JSON response or not a tool call format, continue normally
1575
- pass
1440
+ ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
1441
+
1442
+ if ollama_params:
1443
+ # Get response with streaming
1444
+ if verbose:
1445
+ response_text = ""
1446
+ async for chunk in await litellm.acompletion(
1447
+ **self._build_completion_params(
1448
+ messages=ollama_params["follow_up_messages"],
1449
+ temperature=temperature,
1450
+ stream=stream
1451
+ )
1452
+ ):
1453
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1454
+ content = chunk.choices[0].delta.content
1455
+ response_text += content
1456
+ print("\033[K", end="\r")
1457
+ print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1458
+ else:
1459
+ response_text = ""
1460
+ async for chunk in await litellm.acompletion(
1461
+ **self._build_completion_params(
1462
+ messages=ollama_params["follow_up_messages"],
1463
+ temperature=temperature,
1464
+ stream=stream
1465
+ )
1466
+ ):
1467
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1468
+ response_text += chunk.choices[0].delta.content
1469
+
1470
+ # Set flag to indicate Ollama was handled
1471
+ ollama_handled = True
1472
+ final_response_text = response_text.strip()
1473
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1474
+
1475
+ # Display the response if we got one
1476
+ if final_response_text and verbose:
1477
+ display_interaction(
1478
+ ollama_params["original_prompt"],
1479
+ final_response_text,
1480
+ markdown=markdown,
1481
+ generation_time=time.time() - start_time,
1482
+ console=console
1483
+ )
1484
+
1485
+ # Return the final response after processing Ollama's follow-up
1486
+ if final_response_text:
1487
+ return final_response_text
1488
+ else:
1489
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1576
1490
 
1577
1491
  # If no special handling was needed or if it's not an Ollama model
1578
1492
  if reasoning_steps and not ollama_handled:
@@ -1839,6 +1753,68 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1839
1753
 
1840
1754
  litellm.callbacks = events
1841
1755
 
1756
+ def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
1757
+ """
1758
+ Handle special Ollama model requirements when processing tool results.
1759
+
1760
+ Args:
1761
+ response_text: The initial response text from the model
1762
+ tool_results: List of tool execution results
1763
+ messages: The conversation messages list
1764
+ original_prompt: The original user prompt
1765
+
1766
+ Returns:
1767
+ Dict with follow-up parameters if Ollama needs special handling, None otherwise
1768
+ """
1769
+ if not self._is_ollama_provider() or not tool_results:
1770
+ return None
1771
+
1772
+ # Check if the response is just a JSON tool call
1773
+ try:
1774
+ json_response = json.loads(response_text.strip())
1775
+ if not (('name' in json_response or 'function' in json_response) and
1776
+ not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
1777
+ return None
1778
+
1779
+ logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
1780
+
1781
+ # Extract the original user query from messages
1782
+ original_query = ""
1783
+ for msg in reversed(messages): # Look from the end to find the most recent user message
1784
+ if msg.get("role") == "user":
1785
+ content = msg.get("content", "")
1786
+ # Handle list content (multimodal)
1787
+ if isinstance(content, list):
1788
+ for item in content:
1789
+ if isinstance(item, dict) and item.get("type") == "text":
1790
+ original_query = item.get("text", "")
1791
+ break
1792
+ else:
1793
+ original_query = content
1794
+ if original_query:
1795
+ break
1796
+
1797
+ # Create a shorter follow-up prompt with all tool results
1798
+ # If there's only one result, use it directly; otherwise combine them
1799
+ if len(tool_results) == 1:
1800
+ results_text = json.dumps(tool_results[0], indent=2)
1801
+ else:
1802
+ results_text = json.dumps(tool_results, indent=2)
1803
+
1804
+ follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1805
+ logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1806
+ logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1807
+
1808
+ # Return parameters for follow-up call
1809
+ return {
1810
+ "follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
1811
+ "original_prompt": original_prompt
1812
+ }
1813
+
1814
+ except (json.JSONDecodeError, KeyError):
1815
+ # Not a JSON response or not a tool call format
1816
+ return None
1817
+
1842
1818
  def _build_completion_params(self, **override_params) -> Dict[str, Any]:
1843
1819
  """Build parameters for litellm completion calls with all necessary config"""
1844
1820
  params = {
@@ -3,7 +3,6 @@ import time
3
3
  import json
4
4
  import logging
5
5
  from typing import List, Optional, Dict, Any, Union, Literal, Type
6
- from openai import OpenAI
7
6
  from pydantic import BaseModel, ConfigDict
8
7
  from rich import print
9
8
  from rich.console import Console
@@ -377,24 +376,6 @@ class ReflectionOutput(BaseModel):
377
376
  reflection: str
378
377
  satisfactory: Literal["yes", "no"]
379
378
 
380
- # Constants
381
- LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
382
-
383
- # Initialize OpenAI client with proper API key handling
384
- api_key = os.environ.get("OPENAI_API_KEY")
385
- base_url = os.environ.get("OPENAI_API_BASE") or os.environ.get("OPENAI_BASE_URL")
386
-
387
- # For local servers like LM Studio, allow minimal API key
388
- if base_url and not api_key:
389
- api_key = LOCAL_SERVER_API_KEY_PLACEHOLDER
390
- elif not api_key:
391
- raise ValueError(
392
- "OPENAI_API_KEY environment variable is required for the default OpenAI service. "
393
- "If you are targeting a local server (e.g., LM Studio), ensure OPENAI_API_BASE is set "
394
- f"(e.g., 'http://localhost:1234/v1') and you can use a placeholder API key by setting OPENAI_API_KEY='{LOCAL_SERVER_API_KEY_PLACEHOLDER}'"
395
- )
396
-
397
- client = OpenAI(api_key=api_key, base_url=base_url)
398
379
 
399
380
  class TaskOutput(BaseModel):
400
381
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -5,10 +5,10 @@ from typing import Dict, Optional, List, Any, AsyncGenerator
5
5
  from pydantic import BaseModel, ConfigDict
6
6
  from ..agent.agent import Agent
7
7
  from ..task.task import Task
8
- from ..main import display_error, client
8
+ from ..main import display_error
9
9
  import csv
10
10
  import os
11
- from openai import AsyncOpenAI
11
+ from openai import AsyncOpenAI, OpenAI
12
12
 
13
13
  class LoopItems(BaseModel):
14
14
  model_config = ConfigDict(arbitrary_types_allowed=True)
@@ -140,6 +140,8 @@ class Process:
140
140
 
141
141
  def _get_manager_instructions_with_fallback(self, manager_task, manager_prompt, ManagerInstructions):
142
142
  """Sync version of getting manager instructions with fallback"""
143
+ # Create OpenAI client
144
+ client = OpenAI()
143
145
  try:
144
146
  # First try structured output (OpenAI compatible)
145
147
  logging.info("Attempting structured output...")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.116
3
+ Version: 0.0.118
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.116"
7
+ version = "0.0.118"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [