praisonaiagents 0.0.120__tar.gz → 0.0.121__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/agents/autoagents.py +15 -8
  3. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/llm/llm.py +137 -120
  4. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents.egg-info/PKG-INFO +1 -1
  5. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/pyproject.toml +1 -1
  6. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/README.md +0 -0
  7. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/__init__.py +0 -0
  8. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/agent/__init__.py +0 -0
  9. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/agent/agent.py +0 -0
  10. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/agent/handoff.py +0 -0
  11. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/agent/image_agent.py +0 -0
  12. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/agents/__init__.py +0 -0
  13. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/agents/agents.py +0 -0
  14. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/approval.py +0 -0
  15. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/guardrails/__init__.py +0 -0
  16. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  17. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  18. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/knowledge/__init__.py +0 -0
  19. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/knowledge/chunking.py +0 -0
  20. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/knowledge/knowledge.py +0 -0
  21. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/llm/__init__.py +0 -0
  22. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/llm/openai_client.py +0 -0
  23. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/main.py +0 -0
  24. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/mcp/__init__.py +0 -0
  25. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/mcp/mcp.py +0 -0
  26. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/mcp/mcp_sse.py +0 -0
  27. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/memory/__init__.py +0 -0
  28. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/memory/memory.py +0 -0
  29. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/process/__init__.py +0 -0
  30. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/process/process.py +0 -0
  31. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/session.py +0 -0
  32. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/task/__init__.py +0 -0
  33. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/task/task.py +0 -0
  34. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/telemetry/__init__.py +0 -0
  35. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/telemetry/integration.py +0 -0
  36. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/telemetry/telemetry.py +0 -0
  37. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/README.md +0 -0
  38. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/__init__.py +0 -0
  39. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/arxiv_tools.py +0 -0
  40. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/calculator_tools.py +0 -0
  41. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/csv_tools.py +0 -0
  42. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/duckdb_tools.py +0 -0
  43. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  44. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/excel_tools.py +0 -0
  45. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/file_tools.py +0 -0
  46. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/json_tools.py +0 -0
  47. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/newspaper_tools.py +0 -0
  48. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/pandas_tools.py +0 -0
  49. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/python_tools.py +0 -0
  50. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/searxng_tools.py +0 -0
  51. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/shell_tools.py +0 -0
  52. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/spider_tools.py +0 -0
  53. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/test.py +0 -0
  54. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/tools.py +0 -0
  55. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  56. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  57. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/xml_tools.py +0 -0
  58. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/yaml_tools.py +0 -0
  59. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents/tools/yfinance_tools.py +0 -0
  60. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  61. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  62. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents.egg-info/requires.txt +0 -0
  63. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/praisonaiagents.egg-info/top_level.txt +0 -0
  64. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/setup.cfg +0 -0
  65. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/tests/test-graph-memory.py +0 -0
  66. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/tests/test.py +0 -0
  67. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/tests/test_handoff_compatibility.py +0 -0
  68. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/tests/test_ollama_async_fix.py +0 -0
  69. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/tests/test_ollama_fix.py +0 -0
  70. {praisonaiagents-0.0.120 → praisonaiagents-0.0.121}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.120
3
+ Version: 0.0.121
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -13,7 +13,7 @@ import logging
13
13
  import os
14
14
  from pydantic import BaseModel, ConfigDict
15
15
  from ..main import display_instruction, display_tool_call, display_interaction
16
- from ..llm import get_openai_client, LLM
16
+ from ..llm import get_openai_client, LLM, OpenAIClient
17
17
  import json
18
18
 
19
19
  # Define Pydantic models for structured output
@@ -109,6 +109,8 @@ class AutoAgents(PraisonAIAgents):
109
109
  self.max_execution_time = max_execution_time
110
110
  self.max_iter = max_iter
111
111
  self.reflect_llm = reflect_llm
112
+ self.base_url = base_url
113
+ self.api_key = api_key
112
114
 
113
115
  # Display initial instruction
114
116
  if self.verbose:
@@ -246,7 +248,11 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
246
248
  try:
247
249
  # Check if we have OpenAI API and the model supports structured output
248
250
  if self.llm and (self.llm.startswith('gpt-') or self.llm.startswith('o1-') or self.llm.startswith('o3-')):
249
- client = get_openai_client()
251
+ # Create a new client instance if custom parameters are provided
252
+ if self.api_key or self.base_url:
253
+ client = OpenAIClient(api_key=self.api_key, base_url=self.base_url)
254
+ else:
255
+ client = get_openai_client()
250
256
  use_openai_structured = True
251
257
  except:
252
258
  # If OpenAI client is not available, we'll use the LLM class
@@ -254,15 +260,14 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
254
260
 
255
261
  if use_openai_structured and client:
256
262
  # Use OpenAI's structured output for OpenAI models (backward compatibility)
257
- response = client.beta.chat.completions.parse(
258
- model=self.llm,
259
- response_format=AutoAgentsConfig,
263
+ config = client.parse_structured_output(
260
264
  messages=[
261
265
  {"role": "system", "content": "You are a helpful assistant designed to generate AI agent configurations."},
262
266
  {"role": "user", "content": prompt}
263
- ]
267
+ ],
268
+ response_format=AutoAgentsConfig,
269
+ model=self.llm
264
270
  )
265
- config = response.choices[0].message.parsed
266
271
  else:
267
272
  # Use LLM class for all other providers (Gemini, Anthropic, etc.)
268
273
  llm_instance = LLM(
@@ -350,7 +355,9 @@ Return the configuration in a structured JSON format matching the AutoAgentsConf
350
355
  max_rpm=self.max_rpm,
351
356
  max_execution_time=self.max_execution_time,
352
357
  max_iter=self.max_iter,
353
- reflect_llm=self.reflect_llm
358
+ reflect_llm=self.reflect_llm,
359
+ base_url=self.base_url,
360
+ api_key=self.api_key
354
361
  )
355
362
  agents.append(agent)
356
363
 
@@ -746,6 +746,7 @@ class LLM:
746
746
  )
747
747
  if delta.content:
748
748
  live.update(display_generating(response_text, current_time))
749
+
749
750
  else:
750
751
  # Non-verbose streaming
751
752
  for chunk in litellm.completion(
@@ -759,9 +760,12 @@ class LLM:
759
760
  ):
760
761
  if chunk and chunk.choices and chunk.choices[0].delta:
761
762
  delta = chunk.choices[0].delta
762
- response_text, tool_calls = self._process_stream_delta(
763
- delta, response_text, tool_calls, formatted_tools
764
- )
763
+ if delta.content:
764
+ response_text += delta.content
765
+
766
+ # Capture tool calls from streaming chunks if provider supports it
767
+ if formatted_tools and self._supports_streaming_tools():
768
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
765
769
 
766
770
  response_text = response_text.strip()
767
771
 
@@ -802,20 +806,7 @@ class LLM:
802
806
  # Handle tool calls - Sequential tool calling logic
803
807
  if tool_calls and execute_tool_fn:
804
808
  # Convert tool_calls to a serializable format for all providers
805
- serializable_tool_calls = []
806
- for tc in tool_calls:
807
- if isinstance(tc, dict):
808
- serializable_tool_calls.append(tc) # Already a dict
809
- else:
810
- # Convert object to dict
811
- serializable_tool_calls.append({
812
- "id": tc.id,
813
- "type": getattr(tc, 'type', "function"),
814
- "function": {
815
- "name": tc.function.name,
816
- "arguments": tc.function.arguments
817
- }
818
- })
809
+ serializable_tool_calls = self._serialize_tool_calls(tool_calls)
819
810
  messages.append({
820
811
  "role": "assistant",
821
812
  "content": response_text,
@@ -826,20 +817,8 @@ class LLM:
826
817
  tool_results = [] # Store all tool results
827
818
  for tool_call in tool_calls:
828
819
  # Handle both object and dict access patterns
829
- if isinstance(tool_call, dict):
830
- is_ollama = self._is_ollama_provider()
831
- function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
832
- else:
833
- # Handle object-style tool calls
834
- try:
835
- function_name = tool_call.function.name
836
- arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
837
- tool_call_id = tool_call.id
838
- except (json.JSONDecodeError, AttributeError) as e:
839
- logging.error(f"Error parsing object-style tool call: {e}")
840
- function_name = "unknown_function"
841
- arguments = {}
842
- tool_call_id = f"tool_{id(tool_call)}"
820
+ is_ollama = self._is_ollama_provider()
821
+ function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
843
822
 
844
823
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
845
824
  tool_result = execute_tool_fn(function_name, arguments)
@@ -1367,6 +1346,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1367
1346
  if delta.content:
1368
1347
  print("\033[K", end="\r")
1369
1348
  print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1349
+
1370
1350
  else:
1371
1351
  # Non-verbose streaming
1372
1352
  async for chunk in await litellm.acompletion(
@@ -1380,9 +1360,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1380
1360
  ):
1381
1361
  if chunk and chunk.choices and chunk.choices[0].delta:
1382
1362
  delta = chunk.choices[0].delta
1383
- response_text, tool_calls = self._process_stream_delta(
1384
- delta, response_text, tool_calls, formatted_tools
1385
- )
1363
+ if delta.content:
1364
+ response_text += delta.content
1365
+
1366
+ # Capture tool calls from streaming chunks if provider supports it
1367
+ if formatted_tools and self._supports_streaming_tools():
1368
+ tool_calls = self._process_tool_calls_from_stream(delta, tool_calls)
1386
1369
 
1387
1370
  response_text = response_text.strip()
1388
1371
 
@@ -1417,20 +1400,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1417
1400
 
1418
1401
  if tool_calls:
1419
1402
  # Convert tool_calls to a serializable format for all providers
1420
- serializable_tool_calls = []
1421
- for tc in tool_calls:
1422
- if isinstance(tc, dict):
1423
- serializable_tool_calls.append(tc) # Already a dict
1424
- else:
1425
- # Convert object to dict
1426
- serializable_tool_calls.append({
1427
- "id": tc.id,
1428
- "type": getattr(tc, 'type', "function"),
1429
- "function": {
1430
- "name": tc.function.name,
1431
- "arguments": tc.function.arguments
1432
- }
1433
- })
1403
+ serializable_tool_calls = self._serialize_tool_calls(tool_calls)
1434
1404
  messages.append({
1435
1405
  "role": "assistant",
1436
1406
  "content": response_text,
@@ -1440,20 +1410,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1440
1410
  tool_results = [] # Store all tool results
1441
1411
  for tool_call in tool_calls:
1442
1412
  # Handle both object and dict access patterns
1443
- if isinstance(tool_call, dict):
1444
- is_ollama = self._is_ollama_provider()
1445
- function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
1446
- else:
1447
- # Handle object-style tool calls
1448
- try:
1449
- function_name = tool_call.function.name
1450
- arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1451
- tool_call_id = tool_call.id
1452
- except (json.JSONDecodeError, AttributeError) as e:
1453
- logging.error(f"Error parsing object-style tool call: {e}")
1454
- function_name = "unknown_function"
1455
- arguments = {}
1456
- tool_call_id = f"tool_{id(tool_call)}"
1413
+ is_ollama = self._is_ollama_provider()
1414
+ function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
1457
1415
 
1458
1416
  tool_result = await execute_tool_fn(function_name, arguments)
1459
1417
  tool_results.append(tool_result) # Store the result
@@ -1899,6 +1857,90 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1899
1857
 
1900
1858
  return params
1901
1859
 
1860
+ def _prepare_response_logging(self, temperature: float, stream: bool, verbose: bool, markdown: bool, **kwargs) -> Optional[Dict[str, Any]]:
1861
+ """Prepare debug logging information for response methods"""
1862
+ if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1863
+ debug_info = {
1864
+ "model": self.model,
1865
+ "timeout": self.timeout,
1866
+ "temperature": temperature,
1867
+ "top_p": self.top_p,
1868
+ "n": self.n,
1869
+ "max_tokens": self.max_tokens,
1870
+ "presence_penalty": self.presence_penalty,
1871
+ "frequency_penalty": self.frequency_penalty,
1872
+ "stream": stream,
1873
+ "verbose": verbose,
1874
+ "markdown": markdown,
1875
+ "kwargs": str(kwargs)
1876
+ }
1877
+ return debug_info
1878
+ return None
1879
+
1880
+ def _process_streaming_chunk(self, chunk) -> Optional[str]:
1881
+ """Extract content from a streaming chunk"""
1882
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1883
+ return chunk.choices[0].delta.content
1884
+ return None
1885
+
1886
+ def _process_tool_calls_from_stream(self, delta, tool_calls: List[Dict]) -> List[Dict]:
1887
+ """Process tool calls from streaming delta chunks.
1888
+
1889
+ This handles the accumulation of tool call data from streaming chunks,
1890
+ building up the complete tool call information incrementally.
1891
+ """
1892
+ if hasattr(delta, 'tool_calls') and delta.tool_calls:
1893
+ for tc in delta.tool_calls:
1894
+ if tc.index >= len(tool_calls):
1895
+ tool_calls.append({
1896
+ "id": tc.id,
1897
+ "type": "function",
1898
+ "function": {"name": "", "arguments": ""}
1899
+ })
1900
+ if tc.function.name:
1901
+ tool_calls[tc.index]["function"]["name"] = tc.function.name
1902
+ if tc.function.arguments:
1903
+ tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
1904
+ return tool_calls
1905
+
1906
+ def _serialize_tool_calls(self, tool_calls) -> List[Dict]:
1907
+ """Convert tool calls to a serializable format for all providers."""
1908
+ serializable_tool_calls = []
1909
+ for tc in tool_calls:
1910
+ if isinstance(tc, dict):
1911
+ serializable_tool_calls.append(tc) # Already a dict
1912
+ else:
1913
+ # Convert object to dict
1914
+ serializable_tool_calls.append({
1915
+ "id": tc.id,
1916
+ "type": getattr(tc, 'type', "function"),
1917
+ "function": {
1918
+ "name": tc.function.name,
1919
+ "arguments": tc.function.arguments
1920
+ }
1921
+ })
1922
+ return serializable_tool_calls
1923
+
1924
+ def _extract_tool_call_info(self, tool_call, is_ollama: bool = False) -> tuple:
1925
+ """Extract function name, arguments, and tool_call_id from a tool call.
1926
+
1927
+ Handles both dict and object formats for tool calls.
1928
+ """
1929
+ if isinstance(tool_call, dict):
1930
+ return self._parse_tool_call_arguments(tool_call, is_ollama)
1931
+ else:
1932
+ # Handle object-style tool calls
1933
+ try:
1934
+ function_name = tool_call.function.name
1935
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1936
+ tool_call_id = tool_call.id
1937
+ except (json.JSONDecodeError, AttributeError) as e:
1938
+ logging.error(f"Error parsing object-style tool call: {e}")
1939
+ function_name = "unknown_function"
1940
+ arguments = {}
1941
+ tool_call_id = f"tool_{id(tool_call)}"
1942
+ return function_name, arguments, tool_call_id
1943
+
1902
1944
  # Response without tool calls
1903
1945
  def response(
1904
1946
  self,
@@ -1946,42 +1988,29 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1946
1988
  )
1947
1989
 
1948
1990
  # Get response from LiteLLM
1991
+ response_text = ""
1992
+ completion_params = self._build_completion_params(
1993
+ messages=messages,
1994
+ temperature=temperature,
1995
+ stream=stream,
1996
+ **kwargs
1997
+ )
1998
+
1949
1999
  if stream:
1950
- response_text = ""
1951
2000
  if verbose:
1952
2001
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
1953
- for chunk in litellm.completion(
1954
- **self._build_completion_params(
1955
- messages=messages,
1956
- temperature=temperature,
1957
- stream=True,
1958
- **kwargs
1959
- )
1960
- ):
1961
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1962
- content = chunk.choices[0].delta.content
2002
+ for chunk in litellm.completion(**completion_params):
2003
+ content = self._process_streaming_chunk(chunk)
2004
+ if content:
1963
2005
  response_text += content
1964
2006
  live.update(display_generating(response_text, start_time))
1965
2007
  else:
1966
- for chunk in litellm.completion(
1967
- **self._build_completion_params(
1968
- messages=messages,
1969
- temperature=temperature,
1970
- stream=True,
1971
- **kwargs
1972
- )
1973
- ):
1974
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1975
- response_text += chunk.choices[0].delta.content
2008
+ for chunk in litellm.completion(**completion_params):
2009
+ content = self._process_streaming_chunk(chunk)
2010
+ if content:
2011
+ response_text += content
1976
2012
  else:
1977
- response = litellm.completion(
1978
- **self._build_completion_params(
1979
- messages=messages,
1980
- temperature=temperature,
1981
- stream=False,
1982
- **kwargs
1983
- )
1984
- )
2013
+ response = litellm.completion(**completion_params)
1985
2014
  response_text = response.choices[0].message.content.strip()
1986
2015
 
1987
2016
  if verbose:
@@ -2022,6 +2051,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2022
2051
 
2023
2052
  logger.debug("Using asynchronous response function")
2024
2053
 
2054
+
2025
2055
  # Log all self values when in debug mode
2026
2056
  self._log_llm_config(
2027
2057
  'Async response method',
@@ -2046,42 +2076,29 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2046
2076
  )
2047
2077
 
2048
2078
  # Get response from LiteLLM
2079
+ response_text = ""
2080
+ completion_params = self._build_completion_params(
2081
+ messages=messages,
2082
+ temperature=temperature,
2083
+ stream=stream,
2084
+ **kwargs
2085
+ )
2086
+
2049
2087
  if stream:
2050
- response_text = ""
2051
2088
  if verbose:
2052
2089
  with Live(display_generating("", start_time), console=console or self.console, refresh_per_second=4) as live:
2053
- async for chunk in await litellm.acompletion(
2054
- **self._build_completion_params(
2055
- messages=messages,
2056
- temperature=temperature,
2057
- stream=True,
2058
- **kwargs
2059
- )
2060
- ):
2061
- if chunk and chunk.choices and chunk.choices[0].delta.content:
2062
- content = chunk.choices[0].delta.content
2090
+ async for chunk in await litellm.acompletion(**completion_params):
2091
+ content = self._process_streaming_chunk(chunk)
2092
+ if content:
2063
2093
  response_text += content
2064
2094
  live.update(display_generating(response_text, start_time))
2065
2095
  else:
2066
- async for chunk in await litellm.acompletion(
2067
- **self._build_completion_params(
2068
- messages=messages,
2069
- temperature=temperature,
2070
- stream=True,
2071
- **kwargs
2072
- )
2073
- ):
2074
- if chunk and chunk.choices and chunk.choices[0].delta.content:
2075
- response_text += chunk.choices[0].delta.content
2096
+ async for chunk in await litellm.acompletion(**completion_params):
2097
+ content = self._process_streaming_chunk(chunk)
2098
+ if content:
2099
+ response_text += content
2076
2100
  else:
2077
- response = await litellm.acompletion(
2078
- **self._build_completion_params(
2079
- messages=messages,
2080
- temperature=temperature,
2081
- stream=False,
2082
- **kwargs
2083
- )
2084
- )
2101
+ response = await litellm.acompletion(**completion_params)
2085
2102
  response_text = response.choices[0].message.content.strip()
2086
2103
 
2087
2104
  if verbose:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.120
3
+ Version: 0.0.121
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.120"
7
+ version = "0.0.121"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [