praisonaiagents 0.0.123__py3-none-any.whl → 0.0.125__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -500,4 +500,4 @@ DO NOT use strings for tasks. Each task MUST be a complete object with all four
500
500
  Creates tasks based on the instructions, then starts execution.
501
501
  Returns the task status and results dictionary.
502
502
  """
503
- return super().start()
503
+ return super().start()
@@ -67,9 +67,6 @@ class Knowledge:
67
67
 
68
68
  # Disable OpenAI API request logging
69
69
  logging.getLogger('openai').setLevel(logging.WARNING)
70
-
71
- # Set root logger to warning to catch any uncategorized logs
72
- logging.getLogger().setLevel(logging.WARNING)
73
70
 
74
71
  @cached_property
75
72
  def _deps(self):
@@ -5,19 +5,16 @@ import os
5
5
  # Disable litellm telemetry before any imports
6
6
  os.environ["LITELLM_TELEMETRY"] = "False"
7
7
 
8
- # Suppress all relevant logs at module level
9
- logging.getLogger("litellm").setLevel(logging.ERROR)
10
- logging.getLogger("openai").setLevel(logging.ERROR)
11
- logging.getLogger("httpx").setLevel(logging.ERROR)
12
- logging.getLogger("httpcore").setLevel(logging.ERROR)
13
- logging.getLogger("pydantic").setLevel(logging.ERROR)
8
+ # Suppress all relevant logs at module level - consistent with main __init__.py
9
+ logging.getLogger("litellm").setLevel(logging.WARNING)
10
+ logging.getLogger("openai").setLevel(logging.WARNING)
11
+ logging.getLogger("httpx").setLevel(logging.WARNING)
12
+ logging.getLogger("httpcore").setLevel(logging.WARNING)
13
+ logging.getLogger("pydantic").setLevel(logging.WARNING)
14
14
 
15
15
  # Suppress pydantic warnings
16
16
  warnings.filterwarnings("ignore", category=UserWarning, module="pydantic")
17
17
 
18
- # Configure logging to suppress all INFO messages
19
- logging.basicConfig(level=logging.WARNING)
20
-
21
18
  # Import after suppressing warnings
22
19
  from .llm import LLM, LLMContextLengthExceededException
23
20
  from .openai_client import (
@@ -406,7 +406,7 @@ class LLM:
406
406
  # missing tool calls or making duplicate calls
407
407
  return False
408
408
 
409
- def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None):
409
+ def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None, tools=None):
410
410
  """Build messages list for LLM completion. Works for both sync and async.
411
411
 
412
412
  Args:
@@ -415,6 +415,7 @@ class LLM:
415
415
  chat_history: Optional list of previous messages
416
416
  output_json: Optional Pydantic model for JSON output
417
417
  output_pydantic: Optional Pydantic model for JSON output (alias)
418
+ tools: Optional list of tools available
418
419
 
419
420
  Returns:
420
421
  tuple: (messages list, original prompt)
@@ -863,32 +864,44 @@ class LLM:
863
864
  ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
864
865
 
865
866
  if ollama_params:
866
- # Get response with streaming
867
- if verbose:
868
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
867
+ # Get response based on streaming mode
868
+ if stream:
869
+ # Streaming approach
870
+ if verbose:
871
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
872
+ response_text = ""
873
+ for chunk in litellm.completion(
874
+ **self._build_completion_params(
875
+ messages=ollama_params["follow_up_messages"],
876
+ temperature=temperature,
877
+ stream=True
878
+ )
879
+ ):
880
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
881
+ content = chunk.choices[0].delta.content
882
+ response_text += content
883
+ live.update(display_generating(response_text, start_time))
884
+ else:
869
885
  response_text = ""
870
886
  for chunk in litellm.completion(
871
887
  **self._build_completion_params(
872
888
  messages=ollama_params["follow_up_messages"],
873
889
  temperature=temperature,
874
- stream=stream
890
+ stream=True
875
891
  )
876
892
  ):
877
893
  if chunk and chunk.choices and chunk.choices[0].delta.content:
878
- content = chunk.choices[0].delta.content
879
- response_text += content
880
- live.update(display_generating(response_text, start_time))
894
+ response_text += chunk.choices[0].delta.content
881
895
  else:
882
- response_text = ""
883
- for chunk in litellm.completion(
896
+ # Non-streaming approach
897
+ resp = litellm.completion(
884
898
  **self._build_completion_params(
885
899
  messages=ollama_params["follow_up_messages"],
886
900
  temperature=temperature,
887
- stream=stream
901
+ stream=False
888
902
  )
889
- ):
890
- if chunk and chunk.choices and chunk.choices[0].delta.content:
891
- response_text += chunk.choices[0].delta.content
903
+ )
904
+ response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
892
905
 
893
906
  # Set flag to indicate Ollama was handled
894
907
  ollama_handled = True
@@ -944,9 +957,26 @@ class LLM:
944
957
 
945
958
  # Otherwise do the existing streaming approach if not already handled
946
959
  elif not ollama_handled:
947
- # Get response after tool calls with streaming
948
- if verbose:
949
- with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
960
+ # Get response after tool calls
961
+ if stream:
962
+ # Streaming approach
963
+ if verbose:
964
+ with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
965
+ final_response_text = ""
966
+ for chunk in litellm.completion(
967
+ **self._build_completion_params(
968
+ messages=messages,
969
+ tools=formatted_tools,
970
+ temperature=temperature,
971
+ stream=True,
972
+ **kwargs
973
+ )
974
+ ):
975
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
976
+ content = chunk.choices[0].delta.content
977
+ final_response_text += content
978
+ live.update(display_generating(final_response_text, current_time))
979
+ else:
950
980
  final_response_text = ""
951
981
  for chunk in litellm.completion(
952
982
  **self._build_completion_params(
@@ -958,22 +988,19 @@ class LLM:
958
988
  )
959
989
  ):
960
990
  if chunk and chunk.choices and chunk.choices[0].delta.content:
961
- content = chunk.choices[0].delta.content
962
- final_response_text += content
963
- live.update(display_generating(final_response_text, current_time))
991
+ final_response_text += chunk.choices[0].delta.content
964
992
  else:
965
- final_response_text = ""
966
- for chunk in litellm.completion(
993
+ # Non-streaming approach
994
+ resp = litellm.completion(
967
995
  **self._build_completion_params(
968
996
  messages=messages,
969
997
  tools=formatted_tools,
970
998
  temperature=temperature,
971
- stream=stream,
999
+ stream=False,
972
1000
  **kwargs
973
1001
  )
974
- ):
975
- if chunk and chunk.choices and chunk.choices[0].delta.content:
976
- final_response_text += chunk.choices[0].delta.content
1002
+ )
1003
+ final_response_text = resp.get("choices", [{}])[0].get("message", {}).get("content", "") or ""
977
1004
 
978
1005
  final_response_text = final_response_text.strip()
979
1006
 
@@ -1858,6 +1885,21 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1858
1885
  # Override with any provided parameters
1859
1886
  params.update(override_params)
1860
1887
 
1888
+ # Add tool_choice="auto" when tools are provided (unless already specified)
1889
+ if 'tools' in params and params['tools'] and 'tool_choice' not in params:
1890
+ # For Gemini models, use tool_choice to encourage tool usage
1891
+ # More comprehensive Gemini model detection
1892
+ if any(prefix in self.model.lower() for prefix in ['gemini', 'gemini/', 'google/gemini']):
1893
+ try:
1894
+ import litellm
1895
+ # Check if model supports function calling before setting tool_choice
1896
+ if litellm.supports_function_calling(model=self.model):
1897
+ params['tool_choice'] = 'auto'
1898
+ except Exception as e:
1899
+ # If check fails, still set tool_choice for known Gemini models
1900
+ logging.debug(f"Could not verify function calling support: {e}. Setting tool_choice anyway.")
1901
+ params['tool_choice'] = 'auto'
1902
+
1861
1903
  return params
1862
1904
 
1863
1905
  def _prepare_response_logging(self, temperature: float, stream: bool, verbose: bool, markdown: bool, **kwargs) -> Optional[Dict[str, Any]]:
praisonaiagents/main.py CHANGED
@@ -9,32 +9,15 @@ from rich.console import Console
9
9
  from rich.panel import Panel
10
10
  from rich.text import Text
11
11
  from rich.markdown import Markdown
12
- from rich.logging import RichHandler
13
12
  from rich.live import Live
14
13
  import asyncio
15
14
 
16
- # # Configure root logger
17
- # logging.basicConfig(level=logging.WARNING)
18
-
19
- # Suppress litellm logs
15
+ # Logging is already configured in __init__.py, just clean up handlers for litellm
20
16
  logging.getLogger("litellm").handlers = []
21
17
  logging.getLogger("litellm.utils").handlers = []
22
18
  logging.getLogger("litellm").propagate = False
23
19
  logging.getLogger("litellm.utils").propagate = False
24
20
 
25
- LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
26
-
27
- logging.basicConfig(
28
- level=getattr(logging, LOGLEVEL, logging.INFO),
29
- format="%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s",
30
- datefmt="[%X]",
31
- handlers=[RichHandler(rich_tracebacks=True)]
32
- )
33
-
34
- # Add these lines to suppress markdown parser debug logs
35
- logging.getLogger('markdown_it').setLevel(logging.WARNING)
36
- logging.getLogger('rich.markdown').setLevel(logging.WARNING)
37
-
38
21
  # Global list to store error logs
39
22
  error_logs = []
40
23
 
@@ -187,15 +187,45 @@ class MCP:
187
187
  self.timeout = timeout
188
188
  self.debug = debug
189
189
 
190
- # Check if this is an SSE URL
190
+ # Check if this is an HTTP URL
191
191
  if isinstance(command_or_string, str) and re.match(r'^https?://', command_or_string):
192
- # Import the SSE client implementation
193
- from .mcp_sse import SSEMCPClient
194
- self.sse_client = SSEMCPClient(command_or_string, debug=debug, timeout=timeout)
195
- self._tools = list(self.sse_client.tools)
196
- self.is_sse = True
197
- self.is_npx = False
198
- return
192
+ # Determine transport type based on URL or kwargs
193
+ if command_or_string.endswith('/sse') and 'transport_type' not in kwargs:
194
+ # Legacy SSE URL - use SSE transport for backward compatibility
195
+ from .mcp_sse import SSEMCPClient
196
+ self.sse_client = SSEMCPClient(command_or_string, debug=debug, timeout=timeout)
197
+ self._tools = list(self.sse_client.tools)
198
+ self.is_sse = True
199
+ self.is_http_stream = False
200
+ self.is_npx = False
201
+ return
202
+ else:
203
+ # Use HTTP Stream transport for all other HTTP URLs
204
+ from .mcp_http_stream import HTTPStreamMCPClient
205
+ # Extract transport options from kwargs
206
+ transport_options = {}
207
+ if 'responseMode' in kwargs:
208
+ transport_options['responseMode'] = kwargs.pop('responseMode')
209
+ if 'headers' in kwargs:
210
+ transport_options['headers'] = kwargs.pop('headers')
211
+ if 'cors' in kwargs:
212
+ transport_options['cors'] = kwargs.pop('cors')
213
+ if 'session' in kwargs:
214
+ transport_options['session'] = kwargs.pop('session')
215
+ if 'resumability' in kwargs:
216
+ transport_options['resumability'] = kwargs.pop('resumability')
217
+
218
+ self.http_stream_client = HTTPStreamMCPClient(
219
+ command_or_string,
220
+ debug=debug,
221
+ timeout=timeout,
222
+ options=transport_options
223
+ )
224
+ self._tools = list(self.http_stream_client.tools)
225
+ self.is_sse = False
226
+ self.is_http_stream = True
227
+ self.is_npx = False
228
+ return
199
229
 
200
230
  # Handle the single string format for stdio client
201
231
  if isinstance(command_or_string, str) and args is None:
@@ -219,6 +249,7 @@ class MCP:
219
249
 
220
250
  # Set up stdio client
221
251
  self.is_sse = False
252
+ self.is_http_stream = False
222
253
 
223
254
  # Ensure UTF-8 encoding in environment for Docker compatibility
224
255
  env = kwargs.get('env', {})
@@ -275,6 +306,9 @@ class MCP:
275
306
  """
276
307
  if self.is_sse:
277
308
  return list(self.sse_client.tools)
309
+
310
+ if self.is_http_stream:
311
+ return list(self.http_stream_client.tools)
278
312
 
279
313
  tool_functions = []
280
314
 
@@ -448,6 +482,10 @@ class MCP:
448
482
  if self.is_sse and hasattr(self, 'sse_client') and self.sse_client.tools:
449
483
  # Return all tools from SSE client
450
484
  return self.sse_client.to_openai_tools()
485
+
486
+ if self.is_http_stream and hasattr(self, 'http_stream_client') and self.http_stream_client.tools:
487
+ # Return all tools from HTTP Stream client
488
+ return self.http_stream_client.to_openai_tools()
451
489
 
452
490
  # For simplicity, we'll convert the first tool only if multiple exist
453
491
  # More complex implementations could handle multiple tools