praisonaiagents 0.0.69__py3-none-any.whl → 0.0.71__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -460,9 +460,28 @@ class LLM:
460
460
  for tool_call in tool_calls:
461
461
  # Handle both object and dict access patterns
462
462
  if isinstance(tool_call, dict):
463
- function_name = tool_call["function"]["name"]
464
- arguments = json.loads(tool_call["function"]["arguments"])
465
- tool_call_id = tool_call["id"]
463
+ # Special handling for Ollama provider which may have a different structure
464
+ if self.model and self.model.startswith("ollama/"):
465
+ try:
466
+ # Try standard format first
467
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
468
+ function_name = tool_call["function"]["name"]
469
+ arguments = json.loads(tool_call["function"]["arguments"])
470
+ else:
471
+ # Try alternative format that Ollama might return
472
+ function_name = tool_call.get("name", "unknown_function")
473
+ arguments = json.loads(tool_call.get("arguments", "{}"))
474
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
475
+ except Exception as e:
476
+ logging.error(f"Error processing Ollama tool call: {e}")
477
+ function_name = "unknown_function"
478
+ arguments = {}
479
+ tool_call_id = f"tool_{id(tool_call)}"
480
+ else:
481
+ # Standard format for other providers
482
+ function_name = tool_call["function"]["name"]
483
+ arguments = json.loads(tool_call["function"]["arguments"])
484
+ tool_call_id = tool_call["id"]
466
485
  else:
467
486
  function_name = tool_call.function.name
468
487
  arguments = json.loads(tool_call.function.arguments)
@@ -490,8 +509,63 @@ class LLM:
490
509
  "content": json.dumps(tool_result) if tool_result is not None else "Function returned an empty output"
491
510
  })
492
511
 
512
+ # Special handling for Ollama models that don't automatically process tool results
513
+ if self.model and self.model.startswith("ollama/") and tool_result:
514
+ # For Ollama models, we need to explicitly ask the model to process the tool results
515
+ # First, check if the response is just a JSON tool call
516
+ try:
517
+ # If the response_text is a valid JSON that looks like a tool call,
518
+ # we need to make a follow-up call to process the results
519
+ json_response = json.loads(response_text.strip())
520
+ if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
521
+ logging.debug("Detected Ollama returning only tool call JSON, making follow-up call to process results")
522
+
523
+ # Create a prompt that asks the model to process the tool results based on original context
524
+ # Extract the original user query from messages
525
+ original_query = ""
526
+ for msg in messages:
527
+ if msg.get("role") == "user":
528
+ original_query = msg.get("content", "")
529
+ break
530
+
531
+ # Create a shorter follow-up prompt
532
+ follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
533
+
534
+ # Make a follow-up call to process the results
535
+ follow_up_messages = [
536
+ {"role": "user", "content": follow_up_prompt}
537
+ ]
538
+
539
+ # Get response with streaming
540
+ if verbose:
541
+ with Live(display_generating("", start_time), console=console, refresh_per_second=4) as live:
542
+ response_text = ""
543
+ for chunk in litellm.completion(
544
+ model=self.model,
545
+ messages=follow_up_messages,
546
+ temperature=temperature,
547
+ stream=True
548
+ ):
549
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
550
+ content = chunk.choices[0].delta.content
551
+ response_text += content
552
+ live.update(display_generating(response_text, start_time))
553
+ else:
554
+ response_text = ""
555
+ for chunk in litellm.completion(
556
+ model=self.model,
557
+ messages=follow_up_messages,
558
+ temperature=temperature,
559
+ stream=True
560
+ ):
561
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
562
+ response_text += chunk.choices[0].delta.content
563
+ except (json.JSONDecodeError, KeyError):
564
+ # Not a JSON response or not a tool call format, continue normally
565
+ pass
566
+
493
567
  # If reasoning_steps is True, do a single non-streaming call
494
- if reasoning_steps:
568
+ elif reasoning_steps:
495
569
  resp = litellm.completion(
496
570
  model=self.model,
497
571
  messages=messages,
@@ -969,9 +1043,28 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
969
1043
  for tool_call in tool_calls:
970
1044
  # Handle both object and dict access patterns
971
1045
  if isinstance(tool_call, dict):
972
- function_name = tool_call["function"]["name"]
973
- arguments = json.loads(tool_call["function"]["arguments"])
974
- tool_call_id = tool_call["id"]
1046
+ # Special handling for Ollama provider which may have a different structure
1047
+ if self.model and self.model.startswith("ollama/"):
1048
+ try:
1049
+ # Try standard format first
1050
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
1051
+ function_name = tool_call["function"]["name"]
1052
+ arguments = json.loads(tool_call["function"]["arguments"])
1053
+ else:
1054
+ # Try alternative format that Ollama might return
1055
+ function_name = tool_call.get("name", "unknown_function")
1056
+ arguments = json.loads(tool_call.get("arguments", "{}"))
1057
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
1058
+ except Exception as e:
1059
+ logging.error(f"Error processing Ollama tool call: {e}")
1060
+ function_name = "unknown_function"
1061
+ arguments = {}
1062
+ tool_call_id = f"tool_{id(tool_call)}"
1063
+ else:
1064
+ # Standard format for other providers
1065
+ function_name = tool_call["function"]["name"]
1066
+ arguments = json.loads(tool_call["function"]["arguments"])
1067
+ tool_call_id = tool_call["id"]
975
1068
  else:
976
1069
  function_name = tool_call.function.name
977
1070
  arguments = json.loads(tool_call.function.arguments)
@@ -994,7 +1087,64 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
994
1087
 
995
1088
  # Get response after tool calls
996
1089
  response_text = ""
997
- if reasoning_steps:
1090
+
1091
+ # Special handling for Ollama models that don't automatically process tool results
1092
+ if self.model and self.model.startswith("ollama/") and tool_result:
1093
+ # For Ollama models, we need to explicitly ask the model to process the tool results
1094
+ # First, check if the response is just a JSON tool call
1095
+ try:
1096
+ # If the response_text is a valid JSON that looks like a tool call,
1097
+ # we need to make a follow-up call to process the results
1098
+ json_response = json.loads(response_text.strip())
1099
+ if ('name' in json_response or 'function' in json_response) and not any(word in response_text.lower() for word in ['summary', 'option', 'result', 'found']):
1100
+ logging.debug("Detected Ollama returning only tool call JSON in async mode, making follow-up call to process results")
1101
+
1102
+ # Create a prompt that asks the model to process the tool results based on original context
1103
+ # Extract the original user query from messages
1104
+ original_query = ""
1105
+ for msg in messages:
1106
+ if msg.get("role") == "user":
1107
+ original_query = msg.get("content", "")
1108
+ break
1109
+
1110
+ # Create a shorter follow-up prompt
1111
+ follow_up_prompt = f"Results:\n{json.dumps(tool_result, indent=2)}\nProvide Answer to this Original Question based on the above results: '{original_query}'"
1112
+
1113
+ # Make a follow-up call to process the results
1114
+ follow_up_messages = [
1115
+ {"role": "user", "content": follow_up_prompt}
1116
+ ]
1117
+
1118
+ # Get response with streaming
1119
+ if verbose:
1120
+ response_text = ""
1121
+ async for chunk in await litellm.acompletion(
1122
+ model=self.model,
1123
+ messages=follow_up_messages,
1124
+ temperature=temperature,
1125
+ stream=True
1126
+ ):
1127
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1128
+ content = chunk.choices[0].delta.content
1129
+ response_text += content
1130
+ print("\033[K", end="\r")
1131
+ print(f"Processing results... {time.time() - start_time:.1f}s", end="\r")
1132
+ else:
1133
+ response_text = ""
1134
+ async for chunk in await litellm.acompletion(
1135
+ model=self.model,
1136
+ messages=follow_up_messages,
1137
+ temperature=temperature,
1138
+ stream=True
1139
+ ):
1140
+ if chunk and chunk.choices and chunk.choices[0].delta.content:
1141
+ response_text += chunk.choices[0].delta.content
1142
+ except (json.JSONDecodeError, KeyError):
1143
+ # Not a JSON response or not a tool call format, continue normally
1144
+ pass
1145
+
1146
+ # If no special handling was needed or if it's not an Ollama model
1147
+ elif reasoning_steps:
998
1148
  # Non-streaming call to capture reasoning
999
1149
  resp = await litellm.acompletion(
1000
1150
  model=self.model,
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.69
3
+ Version: 0.0.71
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Dist: pydantic
@@ -10,7 +10,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
10
10
  praisonaiagents/knowledge/chunking.py,sha256=FzoNY0q8MkvG4gADqk4JcRhmH3lcEHbRdonDgitQa30,6624
11
11
  praisonaiagents/knowledge/knowledge.py,sha256=fQNREDiwdoisfIxJBLVkteXgq_8Gbypfc3UaZbxf5QY,13210
12
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
13
- praisonaiagents/llm/llm.py,sha256=9UhKoTcInXfCFFmQfBEzs3G0t4dJ4yoEpQ5eUG9VC0Q,76574
13
+ praisonaiagents/llm/llm.py,sha256=1WjHumxzuc8sj81NQ4uVEIetUOrb-i58HYLQW7vjV3M,87921
14
14
  praisonaiagents/mcp/__init__.py,sha256=IkYdrAK1bDQDm_0t3Wjt63Zwv3_IJgqz84Wqz9GH2iQ,111
15
15
  praisonaiagents/mcp/mcp.py,sha256=BPPf5AIPXx28PaJJqOg6T3NRyymQH9YAD-Km7Ma9-KA,13681
16
16
  praisonaiagents/memory/memory.py,sha256=I8dOTkrl1i-GgQbDcrFOsSruzJ7MiI6Ys37DK27wrUs,35537
@@ -39,7 +39,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
39
39
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
40
40
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
41
41
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
42
- praisonaiagents-0.0.69.dist-info/METADATA,sha256=ymaq8EM2AZeNlbT-ISjYgHXMHbnHpX53O_Hkcrb5KtA,856
43
- praisonaiagents-0.0.69.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
44
- praisonaiagents-0.0.69.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
45
- praisonaiagents-0.0.69.dist-info/RECORD,,
42
+ praisonaiagents-0.0.71.dist-info/METADATA,sha256=JepummUjGdEF74We-x0puLxcLsYmKL_BFVePXr-6Vr8,856
43
+ praisonaiagents-0.0.71.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
44
+ praisonaiagents-0.0.71.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
45
+ praisonaiagents-0.0.71.dist-info/RECORD,,