praisonaiagents 0.0.105__py3-none-any.whl → 0.0.107__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -549,6 +549,7 @@ class LLM:
549
549
  })
550
550
 
551
551
  should_continue = False
552
+ tool_results = [] # Store all tool results
552
553
  for tool_call in tool_calls:
553
554
  # Handle both object and dict access patterns
554
555
  if isinstance(tool_call, dict):
@@ -569,6 +570,7 @@ class LLM:
569
570
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
570
571
  tool_result = execute_tool_fn(function_name, arguments)
571
572
  logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
573
+ tool_results.append(tool_result) # Store the result
572
574
 
573
575
  if verbose:
574
576
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -601,7 +603,8 @@ class LLM:
601
603
  # If we reach here, no more tool calls needed - get final response
602
604
  # Make one more call to get the final summary response
603
605
  # Special handling for Ollama models that don't automatically process tool results
604
- if self.model and self.model.startswith("ollama/") and tool_result:
606
+ ollama_handled = False
607
+ if self.model and self.model.startswith("ollama/") and tool_results:
605
608
  # For Ollama models, we need to explicitly ask the model to process the tool results
606
609
  # First, check if the response is just a JSON tool call
607
610
  try:
@@ -614,13 +617,30 @@ class LLM:
614
617
  # Create a prompt that asks the model to process the tool results based on original context
615
618
  # Extract the original user query from messages
616
619
  original_query = ""
617
- for msg in messages:
620
+ for msg in reversed(messages): # Look from the end to find the most recent user message
618
621
  if msg.get("role") == "user":
619
- original_query = msg.get("content", "")
620
- break
622
+ content = msg.get("content", "")
623
+ # Handle list content (multimodal)
624
+ if isinstance(content, list):
625
+ for item in content:
626
+ if isinstance(item, dict) and item.get("type") == "text":
627
+ original_query = item.get("text", "")
628
+ break
629
+ else:
630
+ original_query = content
631
+ if original_query:
632
+ break
633
+
634
+ # Create a shorter follow-up prompt with all tool results
635
+ # If there's only one result, use it directly; otherwise combine them
636
+ if len(tool_results) == 1:
637
+ results_text = json.dumps(tool_results[0], indent=2)
638
+ else:
639
+ results_text = json.dumps(tool_results, indent=2)
621
640
 
622
- # Create a shorter follow-up prompt
623
- follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
641
+ follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
642
+ logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
643
+ logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
624
644
 
625
645
  # Make a follow-up call to process the results
626
646
  follow_up_messages = [
@@ -653,12 +673,33 @@ class LLM:
653
673
  ):
654
674
  if chunk and chunk.choices and chunk.choices[0].delta.content:
655
675
  response_text += chunk.choices[0].delta.content
676
+
677
+ # Set flag to indicate Ollama was handled
678
+ ollama_handled = True
679
+ final_response_text = response_text.strip()
680
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
681
+
682
+ # Display the response if we got one
683
+ if final_response_text and verbose:
684
+ display_interaction(
685
+ original_prompt,
686
+ final_response_text,
687
+ markdown=markdown,
688
+ generation_time=time.time() - start_time,
689
+ console=console
690
+ )
691
+
692
+ # Return the final response after processing Ollama's follow-up
693
+ if final_response_text:
694
+ return final_response_text
695
+ else:
696
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
656
697
  except (json.JSONDecodeError, KeyError):
657
698
  # Not a JSON response or not a tool call format, continue normally
658
699
  pass
659
700
 
660
- # If reasoning_steps is True, do a single non-streaming call
661
- elif reasoning_steps:
701
+ # If reasoning_steps is True and we haven't handled Ollama already, do a single non-streaming call
702
+ if reasoning_steps and not ollama_handled:
662
703
  resp = litellm.completion(
663
704
  **self._build_completion_params(
664
705
  messages=messages,
@@ -688,8 +729,8 @@ class LLM:
688
729
  console=console
689
730
  )
690
731
 
691
- # Otherwise do the existing streaming approach
692
- else:
732
+ # Otherwise do the existing streaming approach if not already handled
733
+ elif not ollama_handled:
693
734
  # Get response after tool calls with streaming
694
735
  if verbose:
695
736
  with Live(display_generating("", current_time), console=console, refresh_per_second=4) as live:
@@ -1225,6 +1266,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1225
1266
  "tool_calls": serializable_tool_calls
1226
1267
  })
1227
1268
 
1269
+ tool_results = [] # Store all tool results
1228
1270
  for tool_call in tool_calls:
1229
1271
  # Handle both object and dict access patterns
1230
1272
  if isinstance(tool_call, dict):
@@ -1243,6 +1285,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1243
1285
  tool_call_id = f"tool_{id(tool_call)}"
1244
1286
 
1245
1287
  tool_result = await execute_tool_fn(function_name, arguments)
1288
+ tool_results.append(tool_result) # Store the result
1246
1289
 
1247
1290
  if verbose:
1248
1291
  display_message = f"Agent {agent_name} called function '{function_name}' with arguments: {arguments}\n"
@@ -1261,7 +1304,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1261
1304
  response_text = ""
1262
1305
 
1263
1306
  # Special handling for Ollama models that don't automatically process tool results
1264
- if self._is_ollama_provider() and tool_result:
1307
+ ollama_handled = False
1308
+ if self._is_ollama_provider() and tool_results:
1265
1309
  # For Ollama models, we need to explicitly ask the model to process the tool results
1266
1310
  # First, check if the response is just a JSON tool call
1267
1311
  try:
@@ -1274,13 +1318,30 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1274
1318
  # Create a prompt that asks the model to process the tool results based on original context
1275
1319
  # Extract the original user query from messages
1276
1320
  original_query = ""
1277
- for msg in messages:
1321
+ for msg in reversed(messages): # Look from the end to find the most recent user message
1278
1322
  if msg.get("role") == "user":
1279
- original_query = msg.get("content", "")
1280
- break
1323
+ content = msg.get("content", "")
1324
+ # Handle list content (multimodal)
1325
+ if isinstance(content, list):
1326
+ for item in content:
1327
+ if isinstance(item, dict) and item.get("type") == "text":
1328
+ original_query = item.get("text", "")
1329
+ break
1330
+ else:
1331
+ original_query = content
1332
+ if original_query:
1333
+ break
1334
+
1335
+ # Create a shorter follow-up prompt with all tool results
1336
+ # If there's only one result, use it directly; otherwise combine them
1337
+ if len(tool_results) == 1:
1338
+ results_text = json.dumps(tool_results[0], indent=2)
1339
+ else:
1340
+ results_text = json.dumps(tool_results, indent=2)
1281
1341
 
1282
- # Create a shorter follow-up prompt
1283
- follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1342
+ follow_up_prompt = f"Results:\n{results_text}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1343
+ logging.debug(f"[OLLAMA_DEBUG] Original query extracted: {original_query}")
1344
+ logging.debug(f"[OLLAMA_DEBUG] Follow-up prompt: {follow_up_prompt[:200]}...")
1284
1345
 
1285
1346
  # Make a follow-up call to process the results
1286
1347
  follow_up_messages = [
@@ -1313,12 +1374,33 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1313
1374
  ):
1314
1375
  if chunk and chunk.choices and chunk.choices[0].delta.content:
1315
1376
  response_text += chunk.choices[0].delta.content
1377
+
1378
+ # Set flag to indicate Ollama was handled
1379
+ ollama_handled = True
1380
+ final_response_text = response_text.strip()
1381
+ logging.debug(f"[OLLAMA_DEBUG] Ollama follow-up response: {final_response_text[:200]}...")
1382
+
1383
+ # Display the response if we got one
1384
+ if final_response_text and verbose:
1385
+ display_interaction(
1386
+ original_prompt,
1387
+ final_response_text,
1388
+ markdown=markdown,
1389
+ generation_time=time.time() - start_time,
1390
+ console=console
1391
+ )
1392
+
1393
+ # Return the final response after processing Ollama's follow-up
1394
+ if final_response_text:
1395
+ return final_response_text
1396
+ else:
1397
+ logging.warning("[OLLAMA_DEBUG] Ollama follow-up returned empty response")
1316
1398
  except (json.JSONDecodeError, KeyError):
1317
1399
  # Not a JSON response or not a tool call format, continue normally
1318
1400
  pass
1319
1401
 
1320
1402
  # If no special handling was needed or if it's not an Ollama model
1321
- elif reasoning_steps:
1403
+ if reasoning_steps and not ollama_handled:
1322
1404
  # Non-streaming call to capture reasoning
1323
1405
  resp = await litellm.acompletion(
1324
1406
  **self._build_completion_params(
@@ -1348,8 +1430,8 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1348
1430
  generation_time=time.time() - start_time,
1349
1431
  console=console
1350
1432
  )
1351
- else:
1352
- # Get response after tool calls with streaming
1433
+ elif not ollama_handled:
1434
+ # Get response after tool calls with streaming if not already handled
1353
1435
  if verbose:
1354
1436
  async for chunk in await litellm.acompletion(
1355
1437
  **self._build_completion_params(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.105
3
+ Version: 0.0.107
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -15,7 +15,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
15
15
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
16
16
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
17
17
  praisonaiagents/llm/__init__.py,sha256=bSywIHBHH0YUf4hSx-FmFXkRv2g1Rlhuk-gjoImE8j8,925
18
- praisonaiagents/llm/llm.py,sha256=4AyXTgcolemKf4kHQOwQAIE4MJ0z-CuGs9F7UkHbyfE,98385
18
+ praisonaiagents/llm/llm.py,sha256=JiUOobhPxs3m5Xs7pliNQOVjETqmW8vdM8HIPsZ5DfA,104417
19
19
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
20
20
  praisonaiagents/mcp/mcp.py,sha256=_gfp8hrSVT9aPqEDDfU8MiCdg0-3dVQpEQUE6AbrJlo,17243
21
21
  praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
@@ -51,7 +51,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
51
51
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
52
52
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
53
53
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
54
- praisonaiagents-0.0.105.dist-info/METADATA,sha256=G161N724qL4u2KTozdfECuF3m9vbR7kEFIgrPT0N_Rs,1669
55
- praisonaiagents-0.0.105.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
- praisonaiagents-0.0.105.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
57
- praisonaiagents-0.0.105.dist-info/RECORD,,
54
+ praisonaiagents-0.0.107.dist-info/METADATA,sha256=EulBpJyK-yS6wHQx7VCOCOqk2DSlNcexjNo08mYm3MA,1669
55
+ praisonaiagents-0.0.107.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
56
+ praisonaiagents-0.0.107.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
57
+ praisonaiagents-0.0.107.dist-info/RECORD,,