praisonaiagents 0.0.92__py3-none-any.whl → 0.0.93__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -864,7 +864,11 @@ Your Goal: {self.goal}
864
864
  if self._using_custom_llm:
865
865
  try:
866
866
  # Special handling for MCP tools when using provider/model format
867
- tool_param = self.tools if tools is None else tools
867
+ # Fix: Handle empty tools list properly - use self.tools if tools is None or empty
868
+ if tools is None or (isinstance(tools, list) and len(tools) == 0):
869
+ tool_param = self.tools
870
+ else:
871
+ tool_param = tools
868
872
 
869
873
  # Convert MCP tool objects to OpenAI format if needed
870
874
  if tool_param is not None:
@@ -205,6 +205,72 @@ class LLM:
205
205
  }
206
206
  logging.debug(f"LLM instance initialized with: {json.dumps(debug_info, indent=2, default=str)}")
207
207
 
208
+ def _is_ollama_provider(self) -> bool:
209
+ """Detect if this is an Ollama provider regardless of naming convention"""
210
+ if not self.model:
211
+ return False
212
+
213
+ # Direct ollama/ prefix
214
+ if self.model.startswith("ollama/"):
215
+ return True
216
+
217
+ # Check environment variables for Ollama base URL
218
+ base_url = os.getenv("OPENAI_BASE_URL", "")
219
+ api_base = os.getenv("OPENAI_API_BASE", "")
220
+
221
+ # Common Ollama endpoints
222
+ ollama_endpoints = ["localhost:11434", "127.0.0.1:11434", ":11434"]
223
+
224
+ return any(endpoint in base_url or endpoint in api_base for endpoint in ollama_endpoints)
225
+
226
+ def _parse_tool_call_arguments(self, tool_call: Dict, is_ollama: bool = False) -> tuple:
227
+ """
228
+ Safely parse tool call arguments with proper error handling
229
+
230
+ Returns:
231
+ tuple: (function_name, arguments, tool_call_id)
232
+ """
233
+ try:
234
+ if is_ollama:
235
+ # Special handling for Ollama provider which may have different structure
236
+ if "function" in tool_call and isinstance(tool_call["function"], dict):
237
+ function_name = tool_call["function"]["name"]
238
+ arguments = json.loads(tool_call["function"]["arguments"])
239
+ else:
240
+ # Try alternative format that Ollama might return
241
+ function_name = tool_call.get("name", "unknown_function")
242
+ arguments_str = tool_call.get("arguments", "{}")
243
+ arguments = json.loads(arguments_str) if arguments_str else {}
244
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
245
+ else:
246
+ # Standard format for other providers with error handling
247
+ function_name = tool_call["function"]["name"]
248
+ arguments_str = tool_call["function"]["arguments"]
249
+ arguments = json.loads(arguments_str) if arguments_str else {}
250
+ tool_call_id = tool_call["id"]
251
+
252
+ except (KeyError, json.JSONDecodeError, TypeError) as e:
253
+ logging.error(f"Error parsing tool call arguments: {e}")
254
+ function_name = tool_call.get("name", "unknown_function")
255
+ arguments = {}
256
+ tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
257
+
258
+ return function_name, arguments, tool_call_id
259
+
260
+ def _needs_system_message_skip(self) -> bool:
261
+ """Check if this model requires skipping system messages"""
262
+ if not self.model:
263
+ return False
264
+
265
+ # Only skip for specific legacy o1 models that don't support system messages
266
+ legacy_o1_models = [
267
+ "o1-preview", # 2024-09-12 version
268
+ "o1-mini", # 2024-09-12 version
269
+ "o1-mini-2024-09-12" # Explicit dated version
270
+ ]
271
+
272
+ return self.model in legacy_o1_models
273
+
208
274
  def get_response(
209
275
  self,
210
276
  prompt: Union[str, List[Dict]],
@@ -320,7 +386,9 @@ class LLM:
320
386
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
321
387
  elif output_pydantic:
322
388
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
323
- messages.append({"role": "system", "content": system_prompt})
389
+ # Skip system messages for legacy o1 models as they don't support them
390
+ if not self._needs_system_message_skip():
391
+ messages.append({"role": "system", "content": system_prompt})
324
392
 
325
393
  if chat_history:
326
394
  messages.extend(chat_history)
@@ -470,32 +538,19 @@ class LLM:
470
538
  for tool_call in tool_calls:
471
539
  # Handle both object and dict access patterns
472
540
  if isinstance(tool_call, dict):
473
- # Special handling for Ollama provider which may have a different structure
474
- if self.model and self.model.startswith("ollama/"):
475
- try:
476
- # Try standard format first
477
- if "function" in tool_call and isinstance(tool_call["function"], dict):
478
- function_name = tool_call["function"]["name"]
479
- arguments = json.loads(tool_call["function"]["arguments"])
480
- else:
481
- # Try alternative format that Ollama might return
482
- function_name = tool_call.get("name", "unknown_function")
483
- arguments = json.loads(tool_call.get("arguments", "{}"))
484
- tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
485
- except Exception as e:
486
- logging.error(f"Error processing Ollama tool call: {e}")
487
- function_name = "unknown_function"
488
- arguments = {}
489
- tool_call_id = f"tool_{id(tool_call)}"
490
- else:
491
- # Standard format for other providers
492
- function_name = tool_call["function"]["name"]
493
- arguments = json.loads(tool_call["function"]["arguments"])
494
- tool_call_id = tool_call["id"]
541
+ is_ollama = self._is_ollama_provider()
542
+ function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
495
543
  else:
496
- function_name = tool_call.function.name
497
- arguments = json.loads(tool_call.function.arguments)
498
- tool_call_id = tool_call.id
544
+ # Handle object-style tool calls
545
+ try:
546
+ function_name = tool_call.function.name
547
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
548
+ tool_call_id = tool_call.id
549
+ except (json.JSONDecodeError, AttributeError) as e:
550
+ logging.error(f"Error parsing object-style tool call: {e}")
551
+ function_name = "unknown_function"
552
+ arguments = {}
553
+ tool_call_id = f"tool_{id(tool_call)}"
499
554
 
500
555
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
501
556
  tool_result = execute_tool_fn(function_name, arguments)
@@ -867,7 +922,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
867
922
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
868
923
  elif output_pydantic:
869
924
  system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
870
- messages.append({"role": "system", "content": system_prompt})
925
+ # Skip system messages for legacy o1 models as they don't support them
926
+ if not self._needs_system_message_skip():
927
+ messages.append({"role": "system", "content": system_prompt})
871
928
 
872
929
  if chat_history:
873
930
  messages.extend(chat_history)
@@ -1065,32 +1122,19 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1065
1122
  for tool_call in tool_calls:
1066
1123
  # Handle both object and dict access patterns
1067
1124
  if isinstance(tool_call, dict):
1068
- # Special handling for Ollama provider which may have a different structure
1069
- if self.model and self.model.startswith("ollama/"):
1070
- try:
1071
- # Try standard format first
1072
- if "function" in tool_call and isinstance(tool_call["function"], dict):
1073
- function_name = tool_call["function"]["name"]
1074
- arguments = json.loads(tool_call["function"]["arguments"])
1075
- else:
1076
- # Try alternative format that Ollama might return
1077
- function_name = tool_call.get("name", "unknown_function")
1078
- arguments = json.loads(tool_call.get("arguments", "{}"))
1079
- tool_call_id = tool_call.get("id", f"tool_{id(tool_call)}")
1080
- except Exception as e:
1081
- logging.error(f"Error processing Ollama tool call: {e}")
1082
- function_name = "unknown_function"
1083
- arguments = {}
1084
- tool_call_id = f"tool_{id(tool_call)}"
1085
- else:
1086
- # Standard format for other providers
1087
- function_name = tool_call["function"]["name"]
1088
- arguments = json.loads(tool_call["function"]["arguments"])
1089
- tool_call_id = tool_call["id"]
1125
+ is_ollama = self._is_ollama_provider()
1126
+ function_name, arguments, tool_call_id = self._parse_tool_call_arguments(tool_call, is_ollama)
1090
1127
  else:
1091
- function_name = tool_call.function.name
1092
- arguments = json.loads(tool_call.function.arguments)
1093
- tool_call_id = tool_call.id
1128
+ # Handle object-style tool calls
1129
+ try:
1130
+ function_name = tool_call.function.name
1131
+ arguments = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {}
1132
+ tool_call_id = tool_call.id
1133
+ except (json.JSONDecodeError, AttributeError) as e:
1134
+ logging.error(f"Error parsing object-style tool call: {e}")
1135
+ function_name = "unknown_function"
1136
+ arguments = {}
1137
+ tool_call_id = f"tool_{id(tool_call)}"
1094
1138
 
1095
1139
  tool_result = await execute_tool_fn(function_name, arguments)
1096
1140
 
@@ -1111,7 +1155,7 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1111
1155
  response_text = ""
1112
1156
 
1113
1157
  # Special handling for Ollama models that don't automatically process tool results
1114
- if self.model and self.model.startswith("ollama/") and tool_result:
1158
+ if self._is_ollama_provider() and tool_result:
1115
1159
  # For Ollama models, we need to explicitly ask the model to process the tool results
1116
1160
  # First, check if the response is just a JSON tool call
1117
1161
  try:
@@ -1517,7 +1561,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1517
1561
  # Build messages list
1518
1562
  messages = []
1519
1563
  if system_prompt:
1520
- messages.append({"role": "system", "content": system_prompt})
1564
+ # Skip system messages for legacy o1 models as they don't support them
1565
+ if not self._needs_system_message_skip():
1566
+ messages.append({"role": "system", "content": system_prompt})
1521
1567
 
1522
1568
  # Add prompt to messages
1523
1569
  if isinstance(prompt, list):
@@ -1623,7 +1669,9 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1623
1669
  # Build messages list
1624
1670
  messages = []
1625
1671
  if system_prompt:
1626
- messages.append({"role": "system", "content": system_prompt})
1672
+ # Skip system messages for legacy o1 models as they don't support them
1673
+ if not self._needs_system_message_skip():
1674
+ messages.append({"role": "system", "content": system_prompt})
1627
1675
 
1628
1676
  # Add prompt to messages
1629
1677
  if isinstance(prompt, list):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.92
3
+ Version: 0.0.93
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -1,7 +1,7 @@
1
1
  praisonaiagents/__init__.py,sha256=Z2_rSA6mYozz0r3ioUgKzl3QV8uWRDS_QaqPg2oGjqg,1324
2
2
  praisonaiagents/main.py,sha256=D6XzpqdfglCQiWaH5LjRSv-bB3QkJso-i0h1uTFkPQI,15844
3
3
  praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
4
- praisonaiagents/agent/agent.py,sha256=-zENKxcaAWH5KJOed4KmcpAeBDNtRlxqG58QHdLH6RA,86334
4
+ praisonaiagents/agent/agent.py,sha256=DPpTgobDVZw2qlPzvNS-Xi-OF3RlM2cil6y15cbXIy8,86553
5
5
  praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
6
6
  praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
7
7
  praisonaiagents/agents/agents.py,sha256=-cWRgok0X_4Mk-L7dW6bFdX7JVpxfe7R6aLmukktwKc,59381
@@ -10,7 +10,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
10
10
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
11
11
  praisonaiagents/knowledge/knowledge.py,sha256=Po0JZsgjYJrXdNSggmUGOWidZEF0f8xo4nhsZZfh8tY,13217
12
12
  praisonaiagents/llm/__init__.py,sha256=ttPQQJQq6Tah-0updoEXDZFKWtJAM93rBWRoIgxRWO8,689
13
- praisonaiagents/llm/llm.py,sha256=Y8z7mfzL_OMhoPSIr7k7Demk8HvHmJZv80EXFY6SUEU,91863
13
+ praisonaiagents/llm/llm.py,sha256=9wHmf0aGKf4a7YZ4JONmD7Ela8JBYVrkMFF2ei8Ivpk,93400
14
14
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
15
15
  praisonaiagents/mcp/mcp.py,sha256=-U6md6zHoJZCWF8XFq921Yy5CcSNaGqvjg3aRT737LM,16765
16
16
  praisonaiagents/mcp/mcp_sse.py,sha256=DLh3F_aoVRM1X-7hgIOWOw4FQ1nGmn9YNbQTesykzn4,6792
@@ -40,7 +40,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
40
40
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
41
41
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
42
42
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
43
- praisonaiagents-0.0.92.dist-info/METADATA,sha256=nzF23q2sAFXQ4-TWyV3klRP3yyBUQyA7PwZ_8uvI6z8,1273
44
- praisonaiagents-0.0.92.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
- praisonaiagents-0.0.92.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
- praisonaiagents-0.0.92.dist-info/RECORD,,
43
+ praisonaiagents-0.0.93.dist-info/METADATA,sha256=HlnkZm2D8lKJPxXbT6ODUIbyzhJ1LMArMStjh1vCXlY,1273
44
+ praisonaiagents-0.0.93.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
45
+ praisonaiagents-0.0.93.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
46
+ praisonaiagents-0.0.93.dist-info/RECORD,,