praisonaiagents 0.0.117__tar.gz → 0.0.119__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/llm/llm.py +206 -247
  3. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents.egg-info/PKG-INFO +1 -1
  4. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/pyproject.toml +1 -1
  5. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/README.md +0 -0
  6. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/__init__.py +0 -0
  7. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/agent/__init__.py +0 -0
  8. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/agent/agent.py +0 -0
  9. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/agent/handoff.py +0 -0
  10. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/agent/image_agent.py +0 -0
  11. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/agents/__init__.py +0 -0
  12. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/agents/agents.py +0 -0
  13. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/agents/autoagents.py +0 -0
  14. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/approval.py +0 -0
  15. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/guardrails/__init__.py +0 -0
  16. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  17. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  18. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/knowledge/__init__.py +0 -0
  19. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/knowledge/chunking.py +0 -0
  20. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/knowledge/knowledge.py +0 -0
  21. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/llm/__init__.py +0 -0
  22. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/llm/openai_client.py +0 -0
  23. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/main.py +0 -0
  24. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/mcp/__init__.py +0 -0
  25. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/mcp/mcp.py +0 -0
  26. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/mcp/mcp_sse.py +0 -0
  27. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/memory/__init__.py +0 -0
  28. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/memory/memory.py +0 -0
  29. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/process/__init__.py +0 -0
  30. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/process/process.py +0 -0
  31. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/session.py +0 -0
  32. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/task/__init__.py +0 -0
  33. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/task/task.py +0 -0
  34. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/telemetry/__init__.py +0 -0
  35. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/telemetry/integration.py +0 -0
  36. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/telemetry/telemetry.py +0 -0
  37. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/README.md +0 -0
  38. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/__init__.py +0 -0
  39. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/arxiv_tools.py +0 -0
  40. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/calculator_tools.py +0 -0
  41. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/csv_tools.py +0 -0
  42. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/duckdb_tools.py +0 -0
  43. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  44. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/excel_tools.py +0 -0
  45. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/file_tools.py +0 -0
  46. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/json_tools.py +0 -0
  47. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/newspaper_tools.py +0 -0
  48. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/pandas_tools.py +0 -0
  49. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/python_tools.py +0 -0
  50. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/searxng_tools.py +0 -0
  51. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/shell_tools.py +0 -0
  52. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/spider_tools.py +0 -0
  53. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/test.py +0 -0
  54. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/tools.py +0 -0
  55. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  56. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  57. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/xml_tools.py +0 -0
  58. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/yaml_tools.py +0 -0
  59. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents/tools/yfinance_tools.py +0 -0
  60. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  61. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  62. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents.egg-info/requires.txt +0 -0
  63. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/praisonaiagents.egg-info/top_level.txt +0 -0
  64. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/setup.cfg +0 -0
  65. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/tests/test-graph-memory.py +0 -0
  66. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/tests/test.py +0 -0
  67. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/tests/test_handoff_compatibility.py +0 -0
  68. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/tests/test_ollama_async_fix.py +0 -0
  69. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/tests/test_ollama_fix.py +0 -0
  70. {praisonaiagents-0.0.117 → praisonaiagents-0.0.119}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.117
3
+ Version: 0.0.119
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -229,6 +229,39 @@ class LLM:
229
229
 
230
230
  return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints)
231
231
 
232
+ def _process_stream_delta(self, delta, response_text: str, tool_calls: List[Dict], formatted_tools: Optional[List] = None) -> tuple:
233
+ """
234
+ Process a streaming delta chunk to extract content and tool calls.
235
+
236
+ Args:
237
+ delta: The delta object from a streaming chunk
238
+ response_text: The accumulated response text so far
239
+ tool_calls: The accumulated tool calls list so far
240
+ formatted_tools: Optional list of formatted tools for tool call support check
241
+
242
+ Returns:
243
+ tuple: (updated_response_text, updated_tool_calls)
244
+ """
245
+ # Process content
246
+ if delta.content:
247
+ response_text += delta.content
248
+
249
+ # Capture tool calls from streaming chunks if provider supports it
250
+ if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
251
+ for tc in delta.tool_calls:
252
+ if tc.index >= len(tool_calls):
253
+ tool_calls.append({
254
+ "id": tc.id,
255
+ "type": "function",
256
+ "function": {"name": "", "arguments": ""}
257
+ })
258
+ if tc.function.name:
259
+ tool_calls[tc.index]["function"]["name"] = tc.function.name
260
+ if tc.function.arguments:
261
+ tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
262
+
263
+ return response_text, tool_calls
264
+
232
265
  def _parse_tool_call_arguments(self, tool_call: Dict, is_ollama: bool = False) -> tuple:
233
266
  """
234
267
  Safely parse tool call arguments with proper error handling
@@ -651,23 +684,11 @@ class LLM:
651
684
  ):
652
685
  if chunk and chunk.choices and chunk.choices[0].delta:
653
686
  delta = chunk.choices[0].delta
687
+ response_text, tool_calls = self._process_stream_delta(
688
+ delta, response_text, tool_calls, formatted_tools
689
+ )
654
690
  if delta.content:
655
- response_text += delta.content
656
691
  live.update(display_generating(response_text, current_time))
657
-
658
- # Capture tool calls from streaming chunks if provider supports it
659
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
660
- for tc in delta.tool_calls:
661
- if tc.index >= len(tool_calls):
662
- tool_calls.append({
663
- "id": tc.id,
664
- "type": "function",
665
- "function": {"name": "", "arguments": ""}
666
- })
667
- if tc.function.name:
668
- tool_calls[tc.index]["function"]["name"] = tc.function.name
669
- if tc.function.arguments:
670
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
671
692
  else:
672
693
  # Non-verbose streaming
673
694
  for chunk in litellm.completion(
@@ -681,22 +702,9 @@ class LLM:
681
702
  ):
682
703
  if chunk and chunk.choices and chunk.choices[0].delta:
683
704
  delta = chunk.choices[0].delta
684
- if delta.content:
685
- response_text += delta.content
686
-
687
- # Capture tool calls from streaming chunks if provider supports it
688
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
689
- for tc in delta.tool_calls:
690
- if tc.index >= len(tool_calls):
691
- tool_calls.append({
692
- "id": tc.id,
693
- "type": "function",
694
- "function": {"name": "", "arguments": ""}
695
- })
696
- if tc.function.name:
697
- tool_calls[tc.index]["function"]["name"] = tc.function.name
698
- if tc.function.arguments:
699
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
705
+ response_text, tool_calls = self._process_stream_delta(
706
+ delta, response_text, tool_calls, formatted_tools
707
+ )
700
708
 
701
709
  response_text = response_text.strip()
702
710
 
@@ -813,99 +821,56 @@ class LLM:
813
821
  # Make one more call to get the final summary response
814
822
  # Special handling for Ollama models that don't automatically process tool results
815
823
  ollama_handled = False
816
- if self.model and self.model.startswith("ollama/") and tool_results:
817
- # For Ollama models, we need to explicitly ask the model to process the tool results
818
- # First, check if the response is just a JSON tool call
819
- try:
820
- # If the response_text is a valid JSON that looks like a tool call,
821
- # we need to make a follow-up call to process the results
822
- json_response = json.loads(response_text.strip())
823
- if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
824
- logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")
825
-
826
- # Create a prompt that asks the model to process the tool results based on original context
827
- # Extract the original user query from messages
828
- original_query = ""
829
- for msg in reversed(messages): # Look from the end to find the most recent user message
830
- if msg.get("role") == "user":
831
- content = msg.get("content", "")
832
- # Handle list content (multimodal)
833
- if isinstance(content, list):
834
- for item in content:
835
- if isinstance(item, dict) and item.get("type") == "text":
836
- original_query = item.get("text", "")
837
- break
838
- else:
839
- original_query = content
840
- if original_query:
841
- break
842
-
843
- # Create a shorter follow-up prompt with all tool results
844
- # If there's only one result, use it directly; otherwise combine them
845
- if len(tool_results) == 1:
846
- results_text = json.dumps(tool_results[0], indent=2)
847
- else:
848
- results_text = json.dumps(tool_results, indent=2)
849
-
850
- follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
851
- logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
852
- logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
853
-
854
- # Make a follow-up call to process the results
855
- follow_up_messages = [
856
- {"role": "user", "content": follow_up_prompt}
857
- ]
858
-
859
- # Get response with streaming
860
- if verbose:
861
- with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
862
- response_text = ""
863
- for chunk in litellm.completion(
864
- **self._build_completion_params(
865
- messages=follow_up_messages,
866
- temperature=temperature,
867
- stream=stream
868
- )
869
- ):
870
- if chunk and chunk.choices and chunk.choices[0].delta.content:
871
- content = chunk.choices[0].delta.content
872
- response_text += content
873
- live.update(display_generating(response_text, start_time))
874
- else:
875
- response_text = ""
876
- for chunk in litellm.completion(
877
- **self._build_completion_params(
878
- messages=follow_up_messages,
879
- temperature=temperature,
880
- stream=stream
881
- )
882
- ):
883
- if chunk and chunk.choices and chunk.choices[0].delta.content:
884
- response_text += chunk.choices[0].delta.content
885
-
886
- # Set flag to indicate Ollama was handled
887
- ollama_handled = True
888
- final_response_text = response_text.strip()
889
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
890
-
891
- # Display the response if we got one
892
- if final_response_text and verbose:
893
- display_interaction(
894
- original_prompt,
895
- final_response_text,
896
- markdown=markdown,
897
- generation_time=time.time() - start_time,
898
- console=console
824
+ ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
825
+
826
+ if ollama_params:
827
+ # Get response with streaming
828
+ if verbose:
829
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
830
+ response_text = ""
831
+ for chunk in litellm.completion(
832
+ **self._build_completion_params(
833
+ messages=ollama_params["follow_up_messages"],
834
+ temperature=temperature,
835
+ stream=stream
899
836
  )
900
-
901
- # Return the final response after processing Ollama's follow-up
902
- if final_response_text:
903
- return final_response_text
904
- else:
905
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
906
- except (json.JSONDecodeError, KeyError):
907
- # Not a JSON response or not a tool call format, continue normally
908
- pass
837
+ ):
838
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
839
+ content = chunk.choices[0].delta.content
840
+ response_text += content
841
+ live.update(display_generating(response_text, start_time))
842
+ else:
843
+ response_text = ""
844
+ for chunk in litellm.completion(
845
+ **self._build_completion_params(
846
+ messages=ollama_params["follow_up_messages"],
847
+ temperature=temperature,
848
+ stream=stream
849
+ )
850
+ ):
851
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
852
+ response_text += chunk.choices[0].delta.content
853
+
854
+ # Set flag to indicate Ollama was handled
855
+ ollama_handled = True
856
+ final_response_text = response_text.strip()
857
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
858
+
859
+ # Display the response if we got one
860
+ if final_response_text and verbose:
861
+ display_interaction(
862
+ ollama_params["original_prompt"],
863
+ final_response_text,
864
+ markdown=markdown,
865
+ generation_time=time.time() - start_time,
866
+ console=console
867
+ )
868
+
869
+ # Return the final response after processing Ollama's follow-up
870
+ if final_response_text:
871
+ return final_response_text
872
+ else:
873
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
909
874
 
910
875
  # If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
911
876
  if reasoning_steps and not ollama_handled:
@@ -1340,24 +1305,12 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1340
1305
  ):
1341
1306
  if chunk and chunk.choices and chunk.choices[0].delta:
1342
1307
  delta = chunk.choices[0].delta
1308
+ response_text, tool_calls = self._process_stream_delta(
1309
+ delta, response_text, tool_calls, formatted_tools
1310
+ )
1343
1311
  if delta.content:
1344
- response_text += delta.content
1345
1312
  print("\033[K", end="\r")
1346
1313
  print(f"Generating... {time.time() - start_time:.1f}s", end="\r")
1347
-
1348
- # Capture tool calls from streaming chunks if provider supports it
1349
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
1350
- for tc in delta.tool_calls:
1351
- if tc.index >= len(tool_calls):
1352
- tool_calls.append({
1353
- "id": tc.id,
1354
- "type": "function",
1355
- "function": {"name": "", "arguments": ""}
1356
- })
1357
- if tc.function.name:
1358
- tool_calls[tc.index]["function"]["name"] = tc.function.name
1359
- if tc.function.arguments:
1360
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
1361
1314
  else:
1362
1315
  # Non-verbose streaming
1363
1316
  async for chunk in await litellm.acompletion(
@@ -1371,22 +1324,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1371
1324
  ):
1372
1325
  if chunk and chunk.choices and chunk.choices[0].delta:
1373
1326
  delta = chunk.choices[0].delta
1374
- if delta.content:
1375
- response_text += delta.content
1376
-
1377
- # Capture tool calls from streaming chunks if provider supports it
1378
- if formatted_tools and self._supports_streaming_tools() and hasattr(delta, 'tool_calls') and delta.tool_calls:
1379
- for tc in delta.tool_calls:
1380
- if tc.index >= len(tool_calls):
1381
- tool_calls.append({
1382
- "id": tc.id,
1383
- "type": "function",
1384
- "function": {"name": "", "arguments": ""}
1385
- })
1386
- if tc.function.name:
1387
- tool_calls[tc.index]["function"]["name"] = tc.function.name
1388
- if tc.function.arguments:
1389
- tool_calls[tc.index]["function"]["arguments"] += tc.function.arguments
1327
+ response_text, tool_calls = self._process_stream_delta(
1328
+ delta, response_text, tool_calls, formatted_tools
1329
+ )
1390
1330
 
1391
1331
  response_text = response_text.strip()
1392
1332
 
@@ -1480,99 +1420,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1480
1420
 
1481
1421
  # Special handling for Ollama models that don't automatically process tool results
1482
1422
  ollama_handled = False
1483
- if self._is_ollama_provider() and tool_results:
1484
- # For Ollama models, we need to explicitly ask the model to process the tool results
1485
- # First, check if the response is just a JSON tool call
1486
- try:
1487
- # If the response_text is a valid JSON that looks like a tool call,
1488
- # we need to make a follow-up call to process the results
1489
- json_response = json.loads(response_text.strip())
1490
- if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
1491
- logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
1492
-
1493
- # Create a prompt that asks the model to process the tool results based on original context
1494
- # Extract the original user query from messages
1495
- original_query = ""
1496
- for msg in reversed(messages): # Look from the end to find the most recent user message
1497
- if msg.get("role") == "user":
1498
- content = msg.get("content", "")
1499
- # Handle list content (multimodal)
1500
- if isinstance(content, list):
1501
- for item in content:
1502
- if isinstance(item, dict) and item.get("type") == "text":
1503
- original_query = item.get("text", "")
1504
- break
1505
- else:
1506
- original_query = content
1507
- if original_query:
1508
- break
1509
-
1510
- # Create a shorter follow-up prompt with all tool results
1511
- # If there's only one result, use it directly; otherwise combine them
1512
- if len(tool_results) == 1:
1513
- results_text = json.dumps(tool_results[0], indent=2)
1514
- else:
1515
- results_text = json.dumps(tool_results, indent=2)
1516
-
1517
- follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1518
- logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1519
- logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1520
-
1521
- # Make a follow-up call to process the results
1522
- follow_up_messages = [
1523
- {"role": "user", "content": follow_up_prompt}
1524
- ]
1525
-
1526
- # Get response with streaming
1527
- if verbose:
1528
- response_text = ""
1529
- async for chunk in await litellm.acompletion(
1530
- **self._build_completion_params(
1531
- messages=follow_up_messages,
1532
- temperature=temperature,
1533
- stream=stream
1534
- )
1535
- ):
1536
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1537
- content = chunk.choices[0].delta.content
1538
- response_text += content
1539
- print("\033[K", end="\r")
1540
- print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1541
- else:
1542
- response_text = ""
1543
- async for chunk in await litellm.acompletion(
1544
- **self._build_completion_params(
1545
- messages=follow_up_messages,
1546
- temperature=temperature,
1547
- stream=stream
1548
- )
1549
- ):
1550
- if chunk and chunk.choices and chunk.choices[0].delta.content:
1551
- response_text += chunk.choices[0].delta.content
1552
-
1553
- # Set flag to indicate Ollama was handled
1554
- ollama_handled = True
1555
- final_response_text = response_text.strip()
1556
- logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1557
-
1558
- # Display the response if we got one
1559
- if final_response_text and verbose:
1560
- display_interaction(
1561
- original_prompt,
1562
- final_response_text,
1563
- markdown=markdown,
1564
- generation_time=time.time() - start_time,
1565
- console=console
1566
- )
1567
-
1568
- # Return the final response after processing Ollama's follow-up
1569
- if final_response_text:
1570
- return final_response_text
1571
- else:
1572
- logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1573
- except (json.JSONDecodeError, KeyError):
1574
- # Not a JSON response or not a tool call format, continue normally
1575
- pass
1423
+ ollama_params = self._handle_ollama_model(response_text, tool_results, messages, original_prompt)
1424
+
1425
+ if ollama_params:
1426
+ # Get response with streaming
1427
+ if verbose:
1428
+ response_text = ""
1429
+ async for chunk in await litellm.acompletion(
1430
+ **self._build_completion_params(
1431
+ messages=ollama_params["follow_up_messages"],
1432
+ temperature=temperature,
1433
+ stream=stream
1434
+ )
1435
+ ):
1436
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1437
+ content = chunk.choices[0].delta.content
1438
+ response_text += content
1439
+ print("\033[K", end="\r")
1440
+ print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1441
+ else:
1442
+ response_text = ""
1443
+ async for chunk in await litellm.acompletion(
1444
+ **self._build_completion_params(
1445
+ messages=ollama_params["follow_up_messages"],
1446
+ temperature=temperature,
1447
+ stream=stream
1448
+ )
1449
+ ):
1450
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1451
+ response_text += chunk.choices[0].delta.content
1452
+
1453
+ # Set flag to indicate Ollama was handled
1454
+ ollama_handled = True
1455
+ final_response_text = response_text.strip()
1456
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1457
+
1458
+ # Display the response if we got one
1459
+ if final_response_text and verbose:
1460
+ display_interaction(
1461
+ ollama_params["original_prompt"],
1462
+ final_response_text,
1463
+ markdown=markdown,
1464
+ generation_time=time.time() - start_time,
1465
+ console=console
1466
+ )
1467
+
1468
+ # Return the final response after processing Ollama's follow-up
1469
+ if final_response_text:
1470
+ return final_response_text
1471
+ else:
1472
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1576
1473
 
1577
1474
  # If no special handling was needed or if it's not an Ollama model
1578
1475
  if reasoning_steps and not ollama_handled:
@@ -1839,6 +1736,68 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1839
1736
 
1840
1737
  litellm.callbacks = events
1841
1738
 
1739
+ def _handle_ollama_model(self, response_text: str, tool_results: List[Any], messages: List[Dict], original_prompt: Union[str, List[Dict]]) -> Optional[Dict[str, Any]]:
1740
+ """
1741
+ Handle special Ollama model requirements when processing tool results.
1742
+
1743
+ Args:
1744
+ response_text: The initial response text from the model
1745
+ tool_results: List of tool execution results
1746
+ messages: The conversation messages list
1747
+ original_prompt: The original user prompt
1748
+
1749
+ Returns:
1750
+ Dict with follow-up parameters if Ollama needs special handling, None otherwise
1751
+ """
1752
+ if not self._is_ollama_provider() or not tool_results:
1753
+ return None
1754
+
1755
+ # Check if the response is just a JSON tool call
1756
+ try:
1757
+ json_response = json.loads(response_text.strip())
1758
+ if not (('name' in json_response or 'function' in json_response) and
1759
+ not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found'])):
1760
+ return None
1761
+
1762
+ logging.debug("Detected Ollama returning only tool call JSON, preparing follow-up call to process results")
1763
+
1764
+ # Extract the original user query from messages
1765
+ original_query = ""
1766
+ for msg in reversed(messages): # Look from the end to find the most recent user message
1767
+ if msg.get("role") == "user":
1768
+ content = msg.get("content", "")
1769
+ # Handle list content (multimodal)
1770
+ if isinstance(content, list):
1771
+ for item in content:
1772
+ if isinstance(item, dict) and item.get("type") == "text":
1773
+ original_query = item.get("text", "")
1774
+ break
1775
+ else:
1776
+ original_query = content
1777
+ if original_query:
1778
+ break
1779
+
1780
+ # Create a shorter follow-up prompt with all tool results
1781
+ # If there's only one result, use it directly; otherwise combine them
1782
+ if len(tool_results) == 1:
1783
+ results_text = json.dumps(tool_results[0], indent=2)
1784
+ else:
1785
+ results_text = json.dumps(tool_results, indent=2)
1786
+
1787
+ follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1788
+ logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1789
+ logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1790
+
1791
+ # Return parameters for follow-up call
1792
+ return {
1793
+ "follow_up_messages": [{"role": "user", "content": follow_up_prompt}],
1794
+ "original_prompt": original_prompt
1795
+ }
1796
+
1797
+ except (json.JSONDecodeError, KeyError):
1798
+ # Not a JSON response or not a tool call format
1799
+ return None
1800
+
1842
1801
  def _build_completion_params(self, **override_params) -> Dict[str, Any]:
1843
1802
  """Build parameters for litellm completion calls with all necessary config"""
1844
1803
  params = {
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.117
3
+ Version: 0.0.119
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.117"
7
+ version = "0.0.119"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [