praisonaiagents 0.0.111__py3-none-any.whl → 0.0.113__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -311,6 +311,118 @@ class LLM:
311
311
  # This ensures we make a single non-streaming call rather than risk
312
312
  # missing tool calls or making duplicate calls
313
313
  return False
314
+
315
+ def _build_messages(self, prompt, system_prompt=None, chat_history=None, output_json=None, output_pydantic=None):
316
+ """Build messages list for LLM completion. Works for both sync and async.
317
+
318
+ Args:
319
+ prompt: The user prompt (str or list)
320
+ system_prompt: Optional system prompt
321
+ chat_history: Optional list of previous messages
322
+ output_json: Optional Pydantic model for JSON output
323
+ output_pydantic: Optional Pydantic model for JSON output (alias)
324
+
325
+ Returns:
326
+ tuple: (messages list, original prompt)
327
+ """
328
+ messages = []
329
+
330
+ # Handle system prompt
331
+ if system_prompt:
332
+ # Append JSON schema if needed
333
+ if output_json:
334
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
335
+ elif output_pydantic:
336
+ system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
337
+
338
+ # Skip system messages for legacy o1 models as they don't support them
339
+ if not self._needs_system_message_skip():
340
+ messages.append({"role": "system", "content": system_prompt})
341
+
342
+ # Add chat history if provided
343
+ if chat_history:
344
+ messages.extend(chat_history)
345
+
346
+ # Handle prompt modifications for JSON output
347
+ original_prompt = prompt
348
+ if output_json or output_pydantic:
349
+ if isinstance(prompt, str):
350
+ prompt = prompt + "\nReturn ONLY a valid JSON object. No other text or explanation."
351
+ elif isinstance(prompt, list):
352
+ # Create a copy to avoid modifying the original
353
+ prompt = prompt.copy()
354
+ for item in prompt:
355
+ if item.get("type") == "text":
356
+ item["text"] = item["text"] + "\nReturn ONLY a valid JSON object. No other text or explanation."
357
+ break
358
+
359
+ # Add prompt to messages
360
+ if isinstance(prompt, list):
361
+ messages.append({"role": "user", "content": prompt})
362
+ else:
363
+ messages.append({"role": "user", "content": prompt})
364
+
365
+ return messages, original_prompt
366
+
367
+ def _format_tools_for_litellm(self, tools: Optional[List[Any]]) -> Optional[List[Dict]]:
368
+ """Format tools for LiteLLM - handles all tool formats.
369
+
370
+ Supports:
371
+ - Pre-formatted OpenAI tools (dicts with type='function')
372
+ - Lists of pre-formatted tools
373
+ - Callable functions
374
+ - String function names
375
+
376
+ Args:
377
+ tools: List of tools in various formats
378
+
379
+ Returns:
380
+ List of formatted tools or None
381
+ """
382
+ if not tools:
383
+ return None
384
+
385
+ formatted_tools = []
386
+ for tool in tools:
387
+ # Check if the tool is already in OpenAI format (e.g. from MCP.to_openai_tool())
388
+ if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
389
+ # Validate nested dictionary structure before accessing
390
+ if 'function' in tool and isinstance(tool['function'], dict) and 'name' in tool['function']:
391
+ logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
392
+ formatted_tools.append(tool)
393
+ else:
394
+ logging.debug(f"Skipping malformed OpenAI tool: missing function or name")
395
+ # Handle lists of tools (e.g. from MCP.to_openai_tool())
396
+ elif isinstance(tool, list):
397
+ for subtool in tool:
398
+ if isinstance(subtool, dict) and 'type' in subtool and subtool['type'] == 'function':
399
+ # Validate nested dictionary structure before accessing
400
+ if 'function' in subtool and isinstance(subtool['function'], dict) and 'name' in subtool['function']:
401
+ logging.debug(f"Using pre-formatted OpenAI tool from list: {subtool['function']['name']}")
402
+ formatted_tools.append(subtool)
403
+ else:
404
+ logging.debug(f"Skipping malformed OpenAI tool in list: missing function or name")
405
+ elif callable(tool):
406
+ tool_def = self._generate_tool_definition(tool)
407
+ if tool_def:
408
+ formatted_tools.append(tool_def)
409
+ elif isinstance(tool, str):
410
+ tool_def = self._generate_tool_definition(tool)
411
+ if tool_def:
412
+ formatted_tools.append(tool_def)
413
+ else:
414
+ logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
415
+
416
+ # Validate JSON serialization before returning
417
+ if formatted_tools:
418
+ try:
419
+ import json
420
+ json.dumps(formatted_tools) # Validate serialization
421
+ except (TypeError, ValueError) as e:
422
+ logging.error(f"Tools are not JSON serializable: {e}")
423
+ return None
424
+
425
+ return formatted_tools if formatted_tools else None
314
426
 
315
427
  def get_response(
316
428
  self,
@@ -393,64 +505,16 @@ class LLM:
393
505
  litellm.set_verbose = False
394
506
 
395
507
  # Format tools if provided
396
- formatted_tools = None
397
- if tools:
398
- formatted_tools = []
399
- for tool in tools:
400
- # Check if the tool is already in OpenAI format (e.g. from MCP.to_openai_tool())
401
- if isinstance(tool, dict) and 'type' in tool and tool['type'] == 'function':
402
- logging.debug(f"Using pre-formatted OpenAI tool: {tool['function']['name']}")
403
- formatted_tools.append(tool)
404
- # Handle lists of tools (e.g. from MCP.to_openai_tool())
405
- elif isinstance(tool, list):
406
- for subtool in tool:
407
- if isinstance(subtool, dict) and 'type' in subtool and subtool['type'] == 'function':
408
- logging.debug(f"Using pre-formatted OpenAI tool from list: {subtool['function']['name']}")
409
- formatted_tools.append(subtool)
410
- elif callable(tool):
411
- tool_def = self._generate_tool_definition(tool.__name__)
412
- if tool_def:
413
- formatted_tools.append(tool_def)
414
- elif isinstance(tool, str):
415
- tool_def = self._generate_tool_definition(tool)
416
- if tool_def:
417
- formatted_tools.append(tool_def)
418
- else:
419
- logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
420
-
421
- if not formatted_tools:
422
- formatted_tools = None
423
-
424
- # Build messages list
425
- messages = []
426
- if system_prompt:
427
- if output_json:
428
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
429
- elif output_pydantic:
430
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
431
- # Skip system messages for legacy o1 models as they don't support them
432
- if not self._needs_system_message_skip():
433
- messages.append({"role": "system", "content": system_prompt})
508
+ formatted_tools = self._format_tools_for_litellm(tools)
434
509
 
435
- if chat_history:
436
- messages.extend(chat_history)
437
-
438
- # Handle prompt modifications for JSON output
439
- original_prompt = prompt
440
- if output_json or output_pydantic:
441
- if isinstance(prompt, str):
442
- prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
443
- elif isinstance(prompt, list):
444
- for item in prompt:
445
- if item["type"] == "text":
446
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
447
- break
448
-
449
- # Add prompt to messages
450
- if isinstance(prompt, list):
451
- messages.append({"role": "user", "content": prompt})
452
- else:
453
- messages.append({"role": "user", "content": prompt})
510
+ # Build messages list using shared helper
511
+ messages, original_prompt = self._build_messages(
512
+ prompt=prompt,
513
+ system_prompt=system_prompt,
514
+ chat_history=chat_history,
515
+ output_json=output_json,
516
+ output_pydantic=output_pydantic
517
+ )
454
518
 
455
519
  start_time = time.time()
456
520
  reflection_count = 0
@@ -1160,108 +1224,20 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1160
1224
  reasoning_steps = kwargs.pop('reasoning_steps', self.reasoning_steps)
1161
1225
  litellm.set_verbose = False
1162
1226
 
1163
- # Build messages list
1164
- messages = []
1165
- if system_prompt:
1166
- if output_json:
1167
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_json.model_json_schema())}"
1168
- elif output_pydantic:
1169
- system_prompt += f"\nReturn ONLY a JSON object that matches this Pydantic model: {json.dumps(output_pydantic.model_json_schema())}"
1170
- # Skip system messages for legacy o1 models as they don't support them
1171
- if not self._needs_system_message_skip():
1172
- messages.append({"role": "system", "content": system_prompt})
1173
-
1174
- if chat_history:
1175
- messages.extend(chat_history)
1176
-
1177
- # Handle prompt modifications for JSON output
1178
- original_prompt = prompt
1179
- if output_json or output_pydantic:
1180
- if isinstance(prompt, str):
1181
- prompt += "\nReturn ONLY a valid JSON object. No other text or explanation."
1182
- elif isinstance(prompt, list):
1183
- for item in prompt:
1184
- if item["type"] == "text":
1185
- item["text"] += "\nReturn ONLY a valid JSON object. No other text or explanation."
1186
- break
1187
-
1188
- # Add prompt to messages
1189
- if isinstance(prompt, list):
1190
- messages.append({"role": "user", "content": prompt})
1191
- else:
1192
- messages.append({"role": "user", "content": prompt})
1227
+ # Build messages list using shared helper
1228
+ messages, original_prompt = self._build_messages(
1229
+ prompt=prompt,
1230
+ system_prompt=system_prompt,
1231
+ chat_history=chat_history,
1232
+ output_json=output_json,
1233
+ output_pydantic=output_pydantic
1234
+ )
1193
1235
 
1194
1236
  start_time = time.time()
1195
1237
  reflection_count = 0
1196
1238
 
1197
- # Format tools for LiteLLM
1198
- formatted_tools = None
1199
- if tools:
1200
- logging.debug(f"Starting tool formatting for {len(tools)} tools")
1201
- formatted_tools = []
1202
- for tool in tools:
1203
- logging.debug(f"Processing tool: {tool.__name__ if hasattr(tool, '__name__') else str(tool)}")
1204
- if hasattr(tool, '__name__'):
1205
- tool_name = tool.__name__
1206
- tool_doc = tool.__doc__ or "No description available"
1207
- # Get function signature
1208
- import inspect
1209
- sig = inspect.signature(tool)
1210
- logging.debug(f"Tool signature: {sig}")
1211
- params = {}
1212
- required = []
1213
- for name, param in sig.parameters.items():
1214
- logging.debug(f"Processing parameter: {name} with annotation: {param.annotation}")
1215
- param_type = "string"
1216
- if param.annotation != inspect.Parameter.empty:
1217
- if param.annotation == int:
1218
- param_type = "integer"
1219
- elif param.annotation == float:
1220
- param_type = "number"
1221
- elif param.annotation == bool:
1222
- param_type = "boolean"
1223
- elif param.annotation == Dict:
1224
- param_type = "object"
1225
- elif param.annotation == List:
1226
- param_type = "array"
1227
- elif hasattr(param.annotation, "__name__"):
1228
- param_type = param.annotation.__name__.lower()
1229
- params[name] = {"type": param_type}
1230
- if param.default == inspect.Parameter.empty:
1231
- required.append(name)
1232
-
1233
- logging.debug(f"Generated parameters: {params}")
1234
- logging.debug(f"Required parameters: {required}")
1235
-
1236
- tool_def = {
1237
- "type": "function",
1238
- "function": {
1239
- "name": tool_name,
1240
- "description": tool_doc,
1241
- "parameters": {
1242
- "type": "object",
1243
- "properties": params,
1244
- "required": required
1245
- }
1246
- }
1247
- }
1248
- # Ensure tool definition is JSON serializable
1249
- try:
1250
- json.dumps(tool_def) # Test serialization
1251
- logging.debug(f"Generated tool definition: {tool_def}")
1252
- formatted_tools.append(tool_def)
1253
- except TypeError as e:
1254
- logging.error(f"Tool definition not JSON serializable: {e}")
1255
- continue
1256
-
1257
- # Validate final tools list
1258
- if formatted_tools:
1259
- try:
1260
- json.dumps(formatted_tools) # Final serialization check
1261
- logging.debug(f"Final formatted tools: {json.dumps(formatted_tools, indent=2)}")
1262
- except TypeError as e:
1263
- logging.error(f"Final tools list not JSON serializable: {e}")
1264
- formatted_tools = None
1239
+ # Format tools for LiteLLM using the shared helper
1240
+ formatted_tools = self._format_tools_for_litellm(tools)
1265
1241
 
1266
1242
  response_text = ""
1267
1243
  if reasoning_steps:
@@ -1901,18 +1877,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1901
1877
  }
1902
1878
  logger.debug(f"Response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
1903
1879
 
1904
- # Build messages list
1905
- messages = []
1906
- if system_prompt:
1907
- # Skip system messages for legacy o1 models as they don't support them
1908
- if not self._needs_system_message_skip():
1909
- messages.append({"role": "system", "content": system_prompt})
1910
-
1911
- # Add prompt to messages
1912
- if isinstance(prompt, list):
1913
- messages.append({"role": "user", "content": prompt})
1914
- else:
1915
- messages.append({"role": "user", "content": prompt})
1880
+ # Build messages list using shared helper (simplified version without JSON output)
1881
+ messages, _ = self._build_messages(
1882
+ prompt=prompt,
1883
+ system_prompt=system_prompt
1884
+ )
1916
1885
 
1917
1886
  # Get response from LiteLLM
1918
1887
  if stream:
@@ -2009,18 +1978,11 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2009
1978
  }
2010
1979
  logger.debug(f"Async response method configuration: {json.dumps(debug_info, indent=2, default=str)}")
2011
1980
 
2012
- # Build messages list
2013
- messages = []
2014
- if system_prompt:
2015
- # Skip system messages for legacy o1 models as they don't support them
2016
- if not self._needs_system_message_skip():
2017
- messages.append({"role": "system", "content": system_prompt})
2018
-
2019
- # Add prompt to messages
2020
- if isinstance(prompt, list):
2021
- messages.append({"role": "user", "content": prompt})
2022
- else:
2023
- messages.append({"role": "user", "content": prompt})
1981
+ # Build messages list using shared helper (simplified version without JSON output)
1982
+ messages, _ = self._build_messages(
1983
+ prompt=prompt,
1984
+ system_prompt=system_prompt
1985
+ )
2024
1986
 
2025
1987
  # Get response from LiteLLM
2026
1988
  if stream:
@@ -2076,36 +2038,44 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
2076
2038
  display_error(f"Error in response_async: {str(error)}")
2077
2039
  raise
2078
2040
 
2079
- def _generate_tool_definition(self, function_name: str) -> Optional[Dict]:
2080
- """Generate a tool definition from a function name."""
2081
- logging.debug(f"Attempting to generate tool definition for: {function_name}")
2082
-
2083
- # First try to get the tool definition if it exists
2084
- tool_def_name = f"{function_name}_definition"
2085
- tool_def = globals().get(tool_def_name)
2086
- logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")
2087
-
2088
- if not tool_def:
2089
- import __main__
2090
- tool_def = getattr(__main__, tool_def_name, None)
2091
- logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")
2092
-
2093
- if tool_def:
2094
- logging.debug(f"Found tool definition: {tool_def}")
2095
- return tool_def
2041
+ def _generate_tool_definition(self, function_or_name) -> Optional[Dict]:
2042
+ """Generate a tool definition from a function or function name."""
2043
+ if callable(function_or_name):
2044
+ # Function object passed directly
2045
+ func = function_or_name
2046
+ function_name = func.__name__
2047
+ logging.debug(f"Generating tool definition for callable: {function_name}")
2048
+ else:
2049
+ # Function name string passed
2050
+ function_name = function_or_name
2051
+ logging.debug(f"Attempting to generate tool definition for: {function_name}")
2052
+
2053
+ # First try to get the tool definition if it exists
2054
+ tool_def_name = f"{function_name}_definition"
2055
+ tool_def = globals().get(tool_def_name)
2056
+ logging.debug(f"Looking for {tool_def_name} in globals: {tool_def is not None}")
2057
+
2058
+ if not tool_def:
2059
+ import __main__
2060
+ tool_def = getattr(__main__, tool_def_name, None)
2061
+ logging.debug(f"Looking for {tool_def_name} in __main__: {tool_def is not None}")
2062
+
2063
+ if tool_def:
2064
+ logging.debug(f"Found tool definition: {tool_def}")
2065
+ return tool_def
2096
2066
 
2097
- # Try to find the function
2098
- func = globals().get(function_name)
2099
- logging.debug(f"Looking for {function_name} in globals: {func is not None}")
2100
-
2101
- if not func:
2102
- import __main__
2103
- func = getattr(__main__, function_name, None)
2104
- logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
2105
-
2106
- if not func or not callable(func):
2107
- logging.debug(f"Function {function_name} not found or not callable")
2108
- return None
2067
+ # Try to find the function
2068
+ func = globals().get(function_name)
2069
+ logging.debug(f"Looking for {function_name} in globals: {func is not None}")
2070
+
2071
+ if not func:
2072
+ import __main__
2073
+ func = getattr(__main__, function_name, None)
2074
+ logging.debug(f"Looking for {function_name} in __main__: {func is not None}")
2075
+
2076
+ if not func or not callable(func):
2077
+ logging.debug(f"Function {function_name} not found or not callable")
2078
+ return None
2109
2079
 
2110
2080
  import inspect
2111
2081
  # Handle Langchain and CrewAI tools
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.111
3
+ Version: 0.0.113
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -16,7 +16,7 @@ praisonaiagents/knowledge/__init__.py,sha256=xL1Eh-a3xsHyIcU4foOWF-JdWYIYBALJH9b
16
16
  praisonaiagents/knowledge/chunking.py,sha256=G6wyHa7_8V0_7VpnrrUXbEmUmptlT16ISJYaxmkSgmU,7678
17
17
  praisonaiagents/knowledge/knowledge.py,sha256=OKPar-XGyAp1ndmbOOdCgqFnTCqpOThYVSIZRxZyP58,15683
18
18
  praisonaiagents/llm/__init__.py,sha256=bSywIHBHH0YUf4hSx-FmFXkRv2g1Rlhuk-gjoImE8j8,925
19
- praisonaiagents/llm/llm.py,sha256=e6lER7PVJsmY7ytzTG-PoJgriRjNjUP1_edaecxRLB4,113777
19
+ praisonaiagents/llm/llm.py,sha256=z7o4tlKO0NJCqaXlnlwtPT768YjAB6tqNe_lg2KMTkk,111271
20
20
  praisonaiagents/mcp/__init__.py,sha256=ibbqe3_7XB7VrIcUcetkZiUZS1fTVvyMy_AqCSFG8qc,240
21
21
  praisonaiagents/mcp/mcp.py,sha256=-fFx4MHffnN2woLnnV7Pzx3-1SFkn2j8Gp5F5ZIwKJ0,19698
22
22
  praisonaiagents/mcp/mcp_sse.py,sha256=z8TMFhW9xuLQ7QnpOa3n1-nSHt0-Bf27qso0u4qxYSY,8357
@@ -52,7 +52,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
52
52
  praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
53
53
  praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
54
54
  praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
55
- praisonaiagents-0.0.111.dist-info/METADATA,sha256=pmzoXOPF5cX0ua93z4KDy0Cia78iG89jdKztI6hNjvE,1669
56
- praisonaiagents-0.0.111.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
57
- praisonaiagents-0.0.111.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
58
- praisonaiagents-0.0.111.dist-info/RECORD,,
55
+ praisonaiagents-0.0.113.dist-info/METADATA,sha256=er3TyTx5TQPXGEtyNVBXkNnUUKo8jf7nBtv908Y7WlY,1669
56
+ praisonaiagents-0.0.113.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
57
+ praisonaiagents-0.0.113.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
58
+ praisonaiagents-0.0.113.dist-info/RECORD,,