praisonaiagents 0.0.69__py3-none-any.whl → 0.0.70__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -460,9 +460,28 @@ class LLM:
460
460
  for tool_call in tool_calls:
461
461
  # Handle both object and dict access patterns
462
462
  if isinstance(tool_call, dict):
463
- function_name = tool_call["function"]["name"]
464
- arguments = json.loads(tool_call["function"]["arguments"])
465
- tool_call_id = tool_call["id"]
463
+ # Special handling for Ollama provider which may have a different structure
464
+ if self.model and self.model.startswith("ollama/"):
465
+ try:
466
+ # Try standard format first
467
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
468
+ function_name = tool_call["function"]["name"]
469
+ arguments = json.loads(tool_call["function"]["arguments"])
470
+ else:
471
+ # Try alternative format that Ollama might return
472
+ function_name = tool_call.get("name", "unknown_function")
473
+ arguments = json.loads(tool_call.get("arguments", "{}"))
474
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
475
+ except Exception as e:
476
+ logging.error(f"Error processing Ollama tool call: {e}")
477
+ function_name = "unknown_function"
478
+ arguments = {}
479
+ tool_call_id = f"tool_{id(tool_call)}"
480
+ else:
481
+ # Standard format for other providers
482
+ function_name = tool_call["function"]["name"]
483
+ arguments = json.loads(tool_call["function"]["arguments"])
484
+ tool_call_id = tool_call["id"]
466
485
  else:
467
486
  function_name = tool_call.function.name
468
487
  arguments = json.loads(tool_call.function.arguments)
@@ -490,8 +509,55 @@ class LLM:
490
509
  "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
491
510
  })
492
511
 
512
+ # Special handling for Ollama models that don't automatically process tool results
513
+ if self.model and self.model.startswith("ollama/") and tool_result:
514
+ # For Ollama models, we need to explicitly ask the model to process the tool results
515
+ # First, check if the response is just a JSON tool call
516
+ try:
517
+ # If the response_text is a valid JSON that looks like a tool call,
518
+ # we need to make a follow-up call to process the results
519
+ json_response = json.loads(response_text.strip())
520
+ if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
521
+ logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")
522
+
523
+ # Create a prompt that asks the model to process the tool results
524
+ follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."
525
+
526
+ # Make a follow-up call to process the results
527
+ follow_up_messages = [
528
+ {"role": "user", "content": follow_up_prompt}
529
+ ]
530
+
531
+ # Get response with streaming
532
+ if verbose:
533
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
534
+ response_text = ""
535
+ for chunk in litellm.completion(
536
+ model=self.model,
537
+ messages=follow_up_messages,
538
+ temperature=temperature,
539
+ stream=True
540
+ ):
541
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
542
+ content = chunk.choices[0].delta.content
543
+ response_text += content
544
+ live.update(display_generating(response_text, start_time))
545
+ else:
546
+ response_text = ""
547
+ for chunk in litellm.completion(
548
+ model=self.model,
549
+ messages=follow_up_messages,
550
+ temperature=temperature,
551
+ stream=True
552
+ ):
553
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
554
+ response_text += chunk.choices[0].delta.content
555
+ except (json.JSONDecodeError, KeyError):
556
+ # Not a JSON response or not a tool call format, continue normally
557
+ pass
558
+
493
559
  # If reasoning_steps is True, do a single non-streaming call
494
- if reasoning_steps:
560
+ elif reasoning_steps:
495
561
  resp = litellm.completion(
496
562
  model=self.model,
497
563
  messages=messages,
@@ -969,9 +1035,28 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
969
1035
  for tool_call in tool_calls:
970
1036
  # Handle both object and dict access patterns
971
1037
  if isinstance(tool_call, dict):
972
- function_name = tool_call["function"]["name"]
973
- arguments = json.loads(tool_call["function"]["arguments"])
974
- tool_call_id = tool_call["id"]
1038
+ # Special handling for Ollama provider which may have a different structure
1039
+ if self.model and self.model.startswith("ollama/"):
1040
+ try:
1041
+ # Try standard format first
1042
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
1043
+ function_name = tool_call["function"]["name"]
1044
+ arguments = json.loads(tool_call["function"]["arguments"])
1045
+ else:
1046
+ # Try alternative format that Ollama might return
1047
+ function_name = tool_call.get("name", "unknown_function")
1048
+ arguments = json.loads(tool_call.get("arguments", "{}"))
1049
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
1050
+ except Exception as e:
1051
+ logging.error(f"Error processing Ollama tool call: {e}")
1052
+ function_name = "unknown_function"
1053
+ arguments = {}
1054
+ tool_call_id = f"tool_{id(tool_call)}"
1055
+ else:
1056
+ # Standard format for other providers
1057
+ function_name = tool_call["function"]["name"]
1058
+ arguments = json.loads(tool_call["function"]["arguments"])
1059
+ tool_call_id = tool_call["id"]
975
1060
  else:
976
1061
  function_name = tool_call.function.name
977
1062
  arguments = json.loads(tool_call.function.arguments)
@@ -994,7 +1079,56 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
994
1079
 
995
1080
  # Get response after tool calls
996
1081
  response_text = ""
997
- if reasoning_steps:
1082
+
1083
+ # Special handling for Ollama models that don't automatically process tool results
1084
+ if self.model and self.model.startswith("ollama/") and tool_result:
1085
+ # For Ollama models, we need to explicitly ask the model to process the tool results
1086
+ # First, check if the response is just a JSON tool call
1087
+ try:
1088
+ # If the response_text is a valid JSON that looks like a tool call,
1089
+ # we need to make a follow-up call to process the results
1090
+ json_response = json.loads(response_text.strip())
1091
+ if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
1092
+ logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
1093
+
1094
+ # Create a prompt that asks the model to process the tool results
1095
+ follow_up_prompt = f"I've searched for apartments and found these results. Please analyze them and provide a summary of the best options:\n\n{json.dumps(tool_result, indent=2)}\n\nPlease format your response as a nice summary with the top options."
1096
+
1097
+ # Make a follow-up call to process the results
1098
+ follow_up_messages = [
1099
+ {"role": "user", "content": follow_up_prompt}
1100
+ ]
1101
+
1102
+ # Get response with streaming
1103
+ if verbose:
1104
+ response_text = ""
1105
+ async for chunk in await litellm.acompletion(
1106
+ model=self.model,
1107
+ messages=follow_up_messages,
1108
+ temperature=temperature,
1109
+ stream=True
1110
+ ):
1111
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1112
+ content = chunk.choices[0].delta.content
1113
+ response_text += content
1114
+ print("\033[K", end="\r")
1115
+ print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1116
+ else:
1117
+ response_text = ""
1118
+ async for chunk in await litellm.acompletion(
1119
+ model=self.model,
1120
+ messages=follow_up_messages,
1121
+ temperature=temperature,
1122
+ stream=True
1123
+ ):
1124
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1125
+ response_text += chunk.choices[0].delta.content
1126
+ except (json.JSONDecodeError, KeyError):
1127
+ # Not a JSON response or not a tool call format, continue normally
1128
+ pass
1129
+
1130
+ # If no special handling was needed or if it's not an Ollama model
1131
+ elif reasoning_steps:
998
1132
  # Non-streaming call to capture reasoning
999
1133
  resp = await litellm.acompletion(
1000
1134
  model=self.model,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.69
3
+ Version: 0.0.70
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -10,7 +10,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
10
10
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
11
11
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
12
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
13
- praisonaiagents/llm/llm.py,sha256=9UhKoTcInXfCFFmQfBEzs3G0t4dJ4yoEpQ5eUG9VC0Q,76574
13
+ praisonaiagents/llm/llm.py,sha256=tOdTbssYSBe-o0mA03Ocq_nJPisDZyD1K71qtzCoBRA,87065
14
14
  praisonaiagents/mcp/__init__.py,sha256=IkYdrAK1bDQDm_0t3Wjt63Zwv3_IJgqz84Wqz9GH2iQ,111
15
15
  praisonaiagents/mcp/mcp.py,sha256=BPPf5AIPXx28PaJJqOg6T3NRyymQH9YAD-Km7Ma9-KA,13681
16
16
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
@@ -39,7 +39,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
39
39
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
40
40
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
41
41
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
42
- praisonaiagents-0.0.69.dist-info/METADATA,sha256=ymaq8EM2AZeNlbT-ISjYgHXMHbnHpX53O_Hkcrb5KtA,856
43
- praisonaiagents-0.0.69.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
44
- praisonaiagents-0.0.69.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
45
- praisonaiagents-0.0.69.dist-info/RECORD,,
42
+ praisonaiagents-0.0.70.dist-info/METADATA,sha256=tHQAMvxoSDYguBJ3YDzjkHhHU1vUV6EzdPZ9btlk_Lo,856
43
+ praisonaiagents-0.0.70.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
44
+ praisonaiagents-0.0.70.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
45
+ praisonaiagents-0.0.70.dist-info/RECORD,,