praisonaiagents 0.0.106__tar.gz → 0.0.107__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/llm/llm.py +101 -19
  3. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents.egg-info/PKG-INFO +1 -1
  4. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents.egg-info/SOURCES.txt +2 -0
  5. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/pyproject.toml +1 -1
  6. praisonaiagents-0.0.107/tests/test_ollama_async_fix.py +161 -0
  7. praisonaiagents-0.0.107/tests/test_ollama_fix.py +101 -0
  8. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/README.md +0 -0
  9. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/__init__.py +0 -0
  10. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/agent/__init__.py +0 -0
  11. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/agent/agent.py +0 -0
  12. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/agent/image_agent.py +0 -0
  13. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/agents/__init__.py +0 -0
  14. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/agents/agents.py +0 -0
  15. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/agents/autoagents.py +0 -0
  16. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/approval.py +0 -0
  17. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/guardrails/__init__.py +0 -0
  18. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  19. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  20. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/knowledge/__init__.py +0 -0
  21. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/knowledge/chunking.py +0 -0
  22. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/knowledge/knowledge.py +0 -0
  23. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/llm/__init__.py +0 -0
  24. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/main.py +0 -0
  25. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/mcp/__init__.py +0 -0
  26. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/mcp/mcp.py +0 -0
  27. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/mcp/mcp_sse.py +0 -0
  28. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/memory/__init__.py +0 -0
  29. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/memory/memory.py +0 -0
  30. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/process/__init__.py +0 -0
  31. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/process/process.py +0 -0
  32. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/session.py +0 -0
  33. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/task/__init__.py +0 -0
  34. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/task/task.py +0 -0
  35. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/telemetry/__init__.py +0 -0
  36. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/telemetry/integration.py +0 -0
  37. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/telemetry/telemetry.py +0 -0
  38. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/README.md +0 -0
  39. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/__init__.py +0 -0
  40. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/arxiv_tools.py +0 -0
  41. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/calculator_tools.py +0 -0
  42. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/csv_tools.py +0 -0
  43. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/duckdb_tools.py +0 -0
  44. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  45. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/excel_tools.py +0 -0
  46. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/file_tools.py +0 -0
  47. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/json_tools.py +0 -0
  48. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/newspaper_tools.py +0 -0
  49. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/pandas_tools.py +0 -0
  50. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/python_tools.py +0 -0
  51. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/searxng_tools.py +0 -0
  52. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/shell_tools.py +0 -0
  53. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/spider_tools.py +0 -0
  54. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/test.py +0 -0
  55. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/tools.py +0 -0
  56. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  57. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  58. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/xml_tools.py +0 -0
  59. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/yaml_tools.py +0 -0
  60. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents/tools/yfinance_tools.py +0 -0
  61. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  62. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents.egg-info/requires.txt +0 -0
  63. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/praisonaiagents.egg-info/top_level.txt +0 -0
  64. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/setup.cfg +0 -0
  65. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/tests/test-graph-memory.py +0 -0
  66. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/tests/test.py +0 -0
  67. {praisonaiagents-0.0.106 → praisonaiagents-0.0.107}/tests/test_posthog_fixed.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.106
3
+ Version: 0.0.107
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -549,6 +549,7 @@ class LLM:
549
549
  })
550
550
 
551
551
  should_continue = False
552
+ tool_results = [] # Store all tool results
552
553
  for tool_call in tool_calls:
553
554
  # Handle both object and dict access patterns
554
555
  if isinstance(tool_call, dict):
@@ -569,6 +570,7 @@ class LLM:
569
570
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
570
571
  tool_result = execute_tool_fn(function_name, arguments)
571
572
  logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
573
+ tool_results.append(tool_result) # Store the result
572
574
 
573
575
  if verbose:
574
576
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -601,7 +603,8 @@ class LLM:
601
603
  # If we reach here, no more tool calls needed - get final response
602
604
  # Make one more call to get the final summary response
603
605
  # Special handling for Ollama models that don't automatically process tool results
604
- if self.model and self.model.startswith("ollama/") and tool_result:
606
+ ollama_handled = False
607
+ if self.model and self.model.startswith("ollama/") and tool_results:
605
608
  # For Ollama models, we need to explicitly ask the model to process the tool results
606
609
  # First, check if the response is just a JSON tool call
607
610
  try:
@@ -614,13 +617,30 @@ class LLM:
614
617
  # Create a prompt that asks the model to process the tool results based on original context
615
618
  # Extract the original user query from messages
616
619
  original_query = ""
617
- for msg in messages:
620
+ for msg in reversed(messages): # Look from the end to find the most recent user message
618
621
  if msg.get("role") == "user":
619
- original_query = msg.get("content", "")
620
- break
622
+ content = msg.get("content", "")
623
+ # Handle list content (multimodal)
624
+ if isinstance(content, list):
625
+ for item in content:
626
+ if isinstance(item, dict) and item.get("type") == "text":
627
+ original_query = item.get("text", "")
628
+ break
629
+ else:
630
+ original_query = content
631
+ if original_query:
632
+ break
633
+
634
+ # Create a shorter follow-up prompt with all tool results
635
+ # If there's only one result, use it directly; otherwise combine them
636
+ if len(tool_results) == 1:
637
+ results_text = json.dumps(tool_results[0], indent=2)
638
+ else:
639
+ results_text = json.dumps(tool_results, indent=2)
621
640
 
622
- # Create a shorter follow-up prompt
623
- follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
641
+ follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
642
+ logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
643
+ logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
624
644
 
625
645
  # Make a follow-up call to process the results
626
646
  follow_up_messages = [
@@ -653,12 +673,33 @@ class LLM:
653
673
  ):
654
674
  if chunk and chunk.choices and chunk.choices[0].delta.content:
655
675
  response_text += chunk.choices[0].delta.content
676
+
677
+ # Set flag to indicate Ollama was handled
678
+ ollama_handled = True
679
+ final_response_text = response_text.strip()
680
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
681
+
682
+ # Display the response if we got one
683
+ if final_response_text and verbose:
684
+ display_interaction(
685
+ original_prompt,
686
+ final_response_text,
687
+ markdown=markdown,
688
+ generation_time=time.time() - start_time,
689
+ console=console
690
+ )
691
+
692
+ # Return the final response after processing Ollama's follow-up
693
+ if final_response_text:
694
+ return final_response_text
695
+ else:
696
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
656
697
  except (json.JSONDecodeError, KeyError):
657
698
  # Not a JSON response or not a tool call format, continue normally
658
699
  pass
659
700
 
660
- # If reasoning_steps is True, do a single non-streaming call
661
- elif reasoning_steps:
701
+ # If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
702
+ if reasoning_steps and not ollama_handled:
662
703
  resp = litellm.completion(
663
704
  **self._build_completion_params(
664
705
  messages=messages,
@@ -688,8 +729,8 @@ class LLM:
688
729
  console=console
689
730
  )
690
731
 
691
- # Otherwise do the existing streaming approach
692
- else:
732
+ # Otherwise do the existing streaming approach if not already handled
733
+ elif not ollama_handled:
693
734
  # Get response after tool calls with streaming
694
735
  if verbose:
695
736
  with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
@@ -1225,6 +1266,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1225
1266
  "tool_calls": serializable_tool_calls
1226
1267
  })
1227
1268
 
1269
+ tool_results = [] # Store all tool results
1228
1270
  for tool_call in tool_calls:
1229
1271
  # Handle both object and dict access patterns
1230
1272
  if isinstance(tool_call, dict):
@@ -1243,6 +1285,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1243
1285
  tool_call_id = f"tool_{id(tool_call)}"
1244
1286
 
1245
1287
  tool_result = await execute_tool_fn(function_name, arguments)
1288
+ tool_results.append(tool_result) # Store the result
1246
1289
 
1247
1290
  if verbose:
1248
1291
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1261,7 +1304,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1261
1304
  response_text = ""
1262
1305
 
1263
1306
  # Special handling for Ollama models that don't automatically process tool results
1264
- if self._is_ollama_provider() and tool_result:
1307
+ ollama_handled = False
1308
+ if self._is_ollama_provider() and tool_results:
1265
1309
  # For Ollama models, we need to explicitly ask the model to process the tool results
1266
1310
  # First, check if the response is just a JSON tool call
1267
1311
  try:
@@ -1274,13 +1318,30 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1274
1318
  # Create a prompt that asks the model to process the tool results based on original context
1275
1319
  # Extract the original user query from messages
1276
1320
  original_query = ""
1277
- for msg in messages:
1321
+ for msg in reversed(messages): # Look from the end to find the most recent user message
1278
1322
  if msg.get("role") == "user":
1279
- original_query = msg.get("content", "")
1280
- break
1323
+ content = msg.get("content", "")
1324
+ # Handle list content (multimodal)
1325
+ if isinstance(content, list):
1326
+ for item in content:
1327
+ if isinstance(item, dict) and item.get("type") == "text":
1328
+ original_query = item.get("text", "")
1329
+ break
1330
+ else:
1331
+ original_query = content
1332
+ if original_query:
1333
+ break
1334
+
1335
+ # Create a shorter follow-up prompt with all tool results
1336
+ # If there's only one result, use it directly; otherwise combine them
1337
+ if len(tool_results) == 1:
1338
+ results_text = json.dumps(tool_results[0], indent=2)
1339
+ else:
1340
+ results_text = json.dumps(tool_results, indent=2)
1281
1341
 
1282
- # Create a shorter follow-up prompt
1283
- follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1342
+ follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1343
+ logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1344
+ logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1284
1345
 
1285
1346
  # Make a follow-up call to process the results
1286
1347
  follow_up_messages = [
@@ -1313,12 +1374,33 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1313
1374
  ):
1314
1375
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1315
1376
  response_text += chunk.choices[0].delta.content
1377
+
1378
+ # Set flag to indicate Ollama was handled
1379
+ ollama_handled = True
1380
+ final_response_text = response_text.strip()
1381
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1382
+
1383
+ # Display the response if we got one
1384
+ if final_response_text and verbose:
1385
+ display_interaction(
1386
+ original_prompt,
1387
+ final_response_text,
1388
+ markdown=markdown,
1389
+ generation_time=time.time() - start_time,
1390
+ console=console
1391
+ )
1392
+
1393
+ # Return the final response after processing Ollama's follow-up
1394
+ if final_response_text:
1395
+ return final_response_text
1396
+ else:
1397
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1316
1398
  except (json.JSONDecodeError, KeyError):
1317
1399
  # Not a JSON response or not a tool call format, continue normally
1318
1400
  pass
1319
1401
 
1320
1402
  # If no special handling was needed or if it's not an Ollama model
1321
- elif reasoning_steps:
1403
+ if reasoning_steps and not ollama_handled:
1322
1404
  # Non-streaming call to capture reasoning
1323
1405
  resp = await litellm.acompletion(
1324
1406
  **self._build_completion_params(
@@ -1348,8 +1430,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1348
1430
  generation_time=time.time() - start_time,
1349
1431
  console=console
1350
1432
  )
1351
- else:
1352
- # Get response after tool calls with streaming
1433
+ elif not ollama_handled:
1434
+ # Get response after tool calls with streaming if not already handled
1353
1435
  if verbose:
1354
1436
  async for chunk in await litellm.acompletion(
1355
1437
  **self._build_completion_params(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.106
3
+ Version: 0.0.107
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -60,4 +60,6 @@ praisonaiagents/tools/yfinance_tools.py
60
60
  praisonaiagents/tools/train/data/generatecot.py
61
61
  tests/test-graph-memory.py
62
62
  tests/test.py
63
+ tests/test_ollama_async_fix.py
64
+ tests/test_ollama_fix.py
63
65
  tests/test_posthog_fixed.py
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.106"
7
+ version = "0.0.107"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [
@@ -0,0 +1,161 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script to verify Ollama empty response fix for both sync and async methods.
4
+ """
5
+
6
+ import asyncio
7
+ import logging
8
+ from praisonaiagents import Agent, Task, PraisonAIAgents, TaskOutput
9
+ from typing import Dict, Any
10
+
11
+ # Enable debug logging
12
+ logging.basicConfig(level=logging.DEBUG)
13
+
14
+ # Define a simple tool for testing
15
+ def search_tool(query: str) -> Dict[str, Any]:
16
+ """Simulate a search tool that returns results."""
17
+ logging.debug(f"[TEST] Search tool called with query: {query}")
18
+ results = {
19
+ "results": [
20
+ {"title": "Result 1", "description": f"Information about {query}"},
21
+ {"title": "Result 2", "description": f"More details on {query}"}
22
+ ],
23
+ "total": 2
24
+ }
25
+ logging.debug(f"[TEST] Search tool returning: {results}")
26
+ return results
27
+
28
+ # Test with both sync and async workflows
29
+ async def test_model_async(model_name: str):
30
+ print(f"\n{'='*60}")
31
+ print(f"Testing ASYNC with model: {model_name}")
32
+ print('='*60)
33
+
34
+ # Create agent with the search tool
35
+ agent = Agent(
36
+ name="SearchAgent",
37
+ role="Information Researcher",
38
+ goal="Search for information and provide helpful answers",
39
+ backstory="You are an expert at finding and summarizing information.",
40
+ llm=model_name,
41
+ tools=[search_tool],
42
+ verbose=True
43
+ )
44
+
45
+ # Create a task that requires tool usage
46
+ task = Task(
47
+ name="search_task",
48
+ description="Search for information about 'Python programming' and provide a summary of what you found.",
49
+ expected_output="A clear summary of the search results",
50
+ agent=agent
51
+ )
52
+
53
+ # Create and run the workflow
54
+ workflow = PraisonAIAgents(
55
+ agents=[agent],
56
+ tasks=[task],
57
+ verbose=True
58
+ )
59
+
60
+ try:
61
+ result = await workflow.astart() # Use async start
62
+ print(f"\n{'='*60}")
63
+ print(f"ASYNC RESULT for {model_name}:")
64
+ print('='*60)
65
+ print(result)
66
+
67
+ # Check if result is empty
68
+ if not result or (isinstance(result, dict) and not result.get('task_results', {}).get('search_task', {}).get('output')):
69
+ print(f"\n❌ ERROR: Empty response from {model_name} (async)")
70
+ return False
71
+ else:
72
+ print(f"\n✅ SUCCESS: Got valid response from {model_name} (async)")
73
+ return True
74
+
75
+ except Exception as e:
76
+ print(f"\n❌ ERROR with {model_name} (async): {e}")
77
+ import traceback
78
+ traceback.print_exc()
79
+ return False
80
+
81
+ def test_model_sync(model_name: str):
82
+ print(f"\n{'='*60}")
83
+ print(f"Testing SYNC with model: {model_name}")
84
+ print('='*60)
85
+
86
+ # Create agent with the search tool
87
+ agent = Agent(
88
+ name="SearchAgent",
89
+ role="Information Researcher",
90
+ goal="Search for information and provide helpful answers",
91
+ backstory="You are an expert at finding and summarizing information.",
92
+ llm=model_name,
93
+ tools=[search_tool],
94
+ verbose=True
95
+ )
96
+
97
+ # Create a task that requires tool usage
98
+ task = Task(
99
+ name="search_task",
100
+ description="Search for information about 'Python programming' and provide a summary of what you found.",
101
+ expected_output="A clear summary of the search results",
102
+ agent=agent
103
+ )
104
+
105
+ # Create and run the workflow
106
+ workflow = PraisonAIAgents(
107
+ agents=[agent],
108
+ tasks=[task],
109
+ verbose=True
110
+ )
111
+
112
+ try:
113
+ result = workflow.start() # Use sync start
114
+ print(f"\n{'='*60}")
115
+ print(f"SYNC RESULT for {model_name}:")
116
+ print('='*60)
117
+ print(result)
118
+
119
+ # Check if result is empty
120
+ if not result or (isinstance(result, dict) and not result.get('task_results', {}).get('search_task', {}).get('output')):
121
+ print(f"\n❌ ERROR: Empty response from {model_name} (sync)")
122
+ return False
123
+ else:
124
+ print(f"\n✅ SUCCESS: Got valid response from {model_name} (sync)")
125
+ return True
126
+
127
+ except Exception as e:
128
+ print(f"\n❌ ERROR with {model_name} (sync): {e}")
129
+ import traceback
130
+ traceback.print_exc()
131
+ return False
132
+
133
+ async def main():
134
+ print("Testing Ollama empty response fix for both sync and async...")
135
+
136
+ # Test sync methods
137
+ print("\n1. Testing SYNC methods:")
138
+ openai_sync_success = test_model_sync("openai/gpt-4o-mini")
139
+ ollama_sync_success = test_model_sync("ollama/llama3.2")
140
+
141
+ # Test async methods
142
+ print("\n2. Testing ASYNC methods:")
143
+ openai_async_success = await test_model_async("openai/gpt-4o-mini")
144
+ ollama_async_success = await test_model_async("ollama/llama3.2")
145
+
146
+ # Summary
147
+ print(f"\n{'='*60}")
148
+ print("TEST SUMMARY:")
149
+ print('='*60)
150
+ print(f"OpenAI sync test: {'✅ PASSED' if openai_sync_success else '❌ FAILED'}")
151
+ print(f"Ollama sync test: {'✅ PASSED' if ollama_sync_success else '❌ FAILED'}")
152
+ print(f"OpenAI async test: {'✅ PASSED' if openai_async_success else '❌ FAILED'}")
153
+ print(f"Ollama async test: {'✅ PASSED' if ollama_async_success else '❌ FAILED'}")
154
+
155
+ if ollama_sync_success and ollama_async_success:
156
+ print("\n🎉 Ollama empty response issue has been fixed for both sync and async!")
157
+ else:
158
+ print("\n⚠️ Ollama empty response issue still exists in some cases.")
159
+
160
+ if __name__ == "__main__":
161
+ asyncio.run(main())
@@ -0,0 +1,101 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script to verify Ollama empty response fix.
4
+ """
5
+
6
+ import logging
7
+ from praisonaiagents import Agent, Task, PraisonAIAgents, TaskOutput
8
+ from typing import Dict, Any
9
+
10
+ # Enable debug logging
11
+ logging.basicConfig(level=logging.DEBUG)
12
+
13
+ # Define a simple tool for testing
14
+ def search_tool(query: str) -> Dict[str, Any]:
15
+ """Simulate a search tool that returns results."""
16
+ logging.debug(f"[TEST] Search tool called with query: {query}")
17
+ results = {
18
+ "results": [
19
+ {"title": "Result 1", "description": f"Information about {query}"},
20
+ {"title": "Result 2", "description": f"More details on {query}"}
21
+ ],
22
+ "total": 2
23
+ }
24
+ logging.debug(f"[TEST] Search tool returning: {results}")
25
+ return results
26
+
27
+ # Test with both OpenAI and Ollama models
28
+ def test_model(model_name: str):
29
+ print(f"\n{'='*60}")
30
+ print(f"Testing with model: {model_name}")
31
+ print('='*60)
32
+
33
+ # Create agent with the search tool
34
+ agent = Agent(
35
+ name="SearchAgent",
36
+ role="Information Researcher",
37
+ goal="Search for information and provide helpful answers",
38
+ backstory="You are an expert at finding and summarizing information.",
39
+ llm=model_name,
40
+ tools=[search_tool],
41
+ verbose=True
42
+ )
43
+
44
+ # Create a task that requires tool usage
45
+ task = Task(
46
+ name="search_task",
47
+ description="Search for information about 'Python programming' and provide a summary of what you found.",
48
+ expected_output="A clear summary of the search results",
49
+ agent=agent
50
+ )
51
+
52
+ # Create and run the workflow
53
+ workflow = PraisonAIAgents(
54
+ agents=[agent],
55
+ tasks=[task],
56
+ verbose=True
57
+ )
58
+
59
+ try:
60
+ result = workflow.start()
61
+ print(f"\n{'='*60}")
62
+ print(f"FINAL RESULT for {model_name}:")
63
+ print('='*60)
64
+ print(result)
65
+
66
+ # Check if result is empty
67
+ if not result or (isinstance(result, dict) and not result.get('task_results', {}).get('search_task', {}).get('output')):
68
+ print(f"\n❌ ERROR: Empty response from {model_name}")
69
+ return False
70
+ else:
71
+ print(f"\n✅ SUCCESS: Got valid response from {model_name}")
72
+ return True
73
+
74
+ except Exception as e:
75
+ print(f"\n❌ ERROR with {model_name}: {e}")
76
+ import traceback
77
+ traceback.print_exc()
78
+ return False
79
+
80
+ if __name__ == "__main__":
81
+ print("Testing Ollama empty response fix...")
82
+
83
+ # Test with OpenAI first (as baseline)
84
+ print("\n1. Testing with OpenAI (baseline):")
85
+ openai_success = test_model("openai/gpt-4o-mini")
86
+
87
+ # Test with Ollama
88
+ print("\n2. Testing with Ollama:")
89
+ ollama_success = test_model("ollama/llama3.2")
90
+
91
+ # Summary
92
+ print(f"\n{'='*60}")
93
+ print("TEST SUMMARY:")
94
+ print('='*60)
95
+ print(f"OpenAI test: {'✅ PASSED' if openai_success else '❌ FAILED'}")
96
+ print(f"Ollama test: {'✅ PASSED' if ollama_success else '❌ FAILED'}")
97
+
98
+ if ollama_success:
99
+ print("\n🎉 Ollama empty response issue has been fixed!")
100
+ else:
101
+ print("\n⚠️ Ollama empty response issue still exists.")