praisonaiagents 0.0.134__tar.gz → 0.0.136__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (82) hide show
  1. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/PKG-INFO +1 -2
  2. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/llm/llm.py +75 -4
  3. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/llm/openai_client.py +7 -4
  4. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/session.py +126 -4
  5. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents.egg-info/PKG-INFO +1 -2
  6. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents.egg-info/SOURCES.txt +1 -0
  7. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents.egg-info/requires.txt +0 -1
  8. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/pyproject.toml +1 -2
  9. praisonaiagents-0.0.136/tests/test_ollama_sequential_fix.py +129 -0
  10. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/README.md +0 -0
  11. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/__init__.py +0 -0
  12. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agent/__init__.py +0 -0
  13. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agent/agent.py +0 -0
  14. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agent/handoff.py +0 -0
  15. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agent/image_agent.py +0 -0
  16. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agent/router_agent.py +0 -0
  17. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agents/__init__.py +0 -0
  18. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agents/agents.py +0 -0
  19. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/agents/autoagents.py +0 -0
  20. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/approval.py +0 -0
  21. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/guardrails/__init__.py +0 -0
  22. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/guardrails/guardrail_result.py +0 -0
  23. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/guardrails/llm_guardrail.py +0 -0
  24. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/knowledge/__init__.py +0 -0
  25. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/knowledge/chunking.py +0 -0
  26. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/knowledge/knowledge.py +0 -0
  27. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/llm/__init__.py +0 -0
  28. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/llm/model_capabilities.py +0 -0
  29. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/llm/model_router.py +0 -0
  30. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/main.py +0 -0
  31. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/mcp/__init__.py +0 -0
  32. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/mcp/mcp.py +0 -0
  33. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/mcp/mcp_http_stream.py +0 -0
  34. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/mcp/mcp_sse.py +0 -0
  35. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/memory/__init__.py +0 -0
  36. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/memory/memory.py +0 -0
  37. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/process/__init__.py +0 -0
  38. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/process/process.py +0 -0
  39. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/task/__init__.py +0 -0
  40. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/task/task.py +0 -0
  41. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/telemetry/__init__.py +0 -0
  42. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/telemetry/integration.py +0 -0
  43. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/telemetry/telemetry.py +0 -0
  44. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/README.md +0 -0
  45. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/__init__.py +0 -0
  46. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/arxiv_tools.py +0 -0
  47. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/calculator_tools.py +0 -0
  48. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/csv_tools.py +0 -0
  49. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/duckdb_tools.py +0 -0
  50. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  51. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/excel_tools.py +0 -0
  52. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/file_tools.py +0 -0
  53. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/json_tools.py +0 -0
  54. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/newspaper_tools.py +0 -0
  55. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/pandas_tools.py +0 -0
  56. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/python_tools.py +0 -0
  57. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/searxng_tools.py +0 -0
  58. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/shell_tools.py +0 -0
  59. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/spider_tools.py +0 -0
  60. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/test.py +0 -0
  61. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/tools.py +0 -0
  62. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  63. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  64. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/xml_tools.py +0 -0
  65. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/yaml_tools.py +0 -0
  66. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents/tools/yfinance_tools.py +0 -0
  67. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  68. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/praisonaiagents.egg-info/top_level.txt +0 -0
  69. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/setup.cfg +0 -0
  70. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test-graph-memory.py +0 -0
  71. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test.py +0 -0
  72. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_fix_comprehensive.py +0 -0
  73. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_handoff_compatibility.py +0 -0
  74. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_http_stream_basic.py +0 -0
  75. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_llm_self_reflection_direct.py +0 -0
  76. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_ollama_async_fix.py +0 -0
  77. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_ollama_fix.py +0 -0
  78. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_posthog_fixed.py +0 -0
  79. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_self_reflection_comprehensive.py +0 -0
  80. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_self_reflection_fix_simple.py +0 -0
  81. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_self_reflection_fix_verification.py +0 -0
  82. {praisonaiagents-0.0.134 → praisonaiagents-0.0.136}/tests/test_validation_feedback.py +0 -0
@@ -1,13 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.134
3
+ Version: 0.0.136
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
7
7
  Requires-Dist: pydantic
8
8
  Requires-Dist: rich
9
9
  Requires-Dist: openai
10
- Requires-Dist: mcp>=1.6.0
11
10
  Requires-Dist: posthog>=3.0.0
12
11
  Requires-Dist: aiohttp>=3.8.0
13
12
  Provides-Extra: mcp
@@ -2,6 +2,7 @@ import logging
2
2
  import os
3
3
  import warnings
4
4
  import re
5
+ import inspect
5
6
  from typing import Any, Dict, List, Optional, Union, Literal, Callable
6
7
  from pydantic import BaseModel
7
8
  import time
@@ -16,7 +17,6 @@ from ..main import (
16
17
  ReflectionOutput,
17
18
  execute_sync_callback,
18
19
  )
19
- from .model_capabilities import is_gemini_internal_tool
20
20
  from rich.console import Console
21
21
  from rich.live import Live
22
22
 
@@ -380,6 +380,65 @@ class LLM:
380
380
 
381
381
  return function_name, arguments, tool_call_id
382
382
 
383
+ def _validate_and_filter_ollama_arguments(self, function_name: str, arguments: Dict[str, Any], available_tools: List) -> Dict[str, Any]:
384
+ """
385
+ Validate and filter tool call arguments for Ollama provider.
386
+
387
+ Ollama sometimes generates tool calls with mixed parameters where arguments
388
+ from different functions are combined. This method validates arguments against
389
+ the actual function signature and removes invalid parameters.
390
+
391
+ Args:
392
+ function_name: Name of the function to call
393
+ arguments: Arguments provided in the tool call
394
+ available_tools: List of available tool functions
395
+
396
+ Returns:
397
+ Filtered arguments dictionary with only valid parameters
398
+ """
399
+ if not available_tools:
400
+ logging.debug(f"[OLLAMA_FIX] No available tools provided for validation")
401
+ return arguments
402
+
403
+ # Find the target function
404
+ target_function = None
405
+ for tool in available_tools:
406
+ tool_name = getattr(tool, '__name__', str(tool))
407
+ if tool_name == function_name:
408
+ target_function = tool
409
+ break
410
+
411
+ if not target_function:
412
+ logging.debug(f"[OLLAMA_FIX] Function {function_name} not found in available tools")
413
+ return arguments
414
+
415
+ try:
416
+ # Get function signature
417
+ sig = inspect.signature(target_function)
418
+ valid_params = set(sig.parameters.keys())
419
+
420
+ # Filter arguments to only include valid parameters
421
+ filtered_args = {}
422
+ invalid_params = []
423
+
424
+ for param_name, param_value in arguments.items():
425
+ if param_name in valid_params:
426
+ filtered_args[param_name] = param_value
427
+ else:
428
+ invalid_params.append(param_name)
429
+
430
+ if invalid_params:
431
+ logging.debug(f"[OLLAMA_FIX] Function {function_name} received invalid parameters: {invalid_params}")
432
+ logging.debug(f"[OLLAMA_FIX] Valid parameters for {function_name}: {list(valid_params)}")
433
+ logging.debug(f"[OLLAMA_FIX] Original arguments: {arguments}")
434
+ logging.debug(f"[OLLAMA_FIX] Filtered arguments: {filtered_args}")
435
+
436
+ return filtered_args
437
+
438
+ except Exception as e:
439
+ logging.debug(f"[OLLAMA_FIX] Error validating arguments for {function_name}: {e}")
440
+ return arguments
441
+
383
442
  def _needs_system_message_skip(self) -> bool:
384
443
  """Check if this model requires skipping system messages"""
385
444
  if not self.model:
@@ -591,10 +650,14 @@ class LLM:
591
650
  if tool_def:
592
651
  formatted_tools.append(tool_def)
593
652
  # Handle Gemini internal tools (e.g., {"googleSearch": {}}, {"urlContext": {}}, {"codeExecution": {}})
594
- elif is_gemini_internal_tool(tool):
653
+ elif isinstance(tool, dict) and len(tool) == 1:
595
654
  tool_name = next(iter(tool.keys()))
596
- logging.debug(f"Using Gemini internal tool: {tool_name}")
597
- formatted_tools.append(tool)
655
+ gemini_internal_tools = {'googleSearch', 'urlContext', 'codeExecution'}
656
+ if tool_name in gemini_internal_tools:
657
+ logging.debug(f"Using Gemini internal tool: {tool_name}")
658
+ formatted_tools.append(tool)
659
+ else:
660
+ logging.debug(f"Skipping unknown tool: {tool_name}")
598
661
  else:
599
662
  logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
600
663
 
@@ -959,6 +1022,10 @@ class LLM:
959
1022
  is_ollama = self._is_ollama_provider()
960
1023
  function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
961
1024
 
1025
+ # Validate and filter arguments for Ollama provider
1026
+ if is_ollama and tools:
1027
+ arguments = self._validate_and_filter_ollama_arguments(function_name, arguments, tools)
1028
+
962
1029
  logging.debug(f"[TOOL_EXEC_DEBUG] About to execute tool {function_name} with args: {arguments}")
963
1030
  tool_result = execute_tool_fn(function_name, arguments)
964
1031
  logging.debug(f"[TOOL_EXEC_DEBUG] Tool execution result: {tool_result}")
@@ -1610,6 +1677,10 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1610
1677
  is_ollama = self._is_ollama_provider()
1611
1678
  function_name, arguments, tool_call_id = self._extract_tool_call_info(tool_call, is_ollama)
1612
1679
 
1680
+ # Validate and filter arguments for Ollama provider
1681
+ if is_ollama and tools:
1682
+ arguments = self._validate_and_filter_ollama_arguments(function_name, arguments, tools)
1683
+
1613
1684
  tool_result = await execute_tool_fn(function_name, arguments)
1614
1685
  tool_results.append(tool_result) # Store the result
1615
1686
 
@@ -18,7 +18,6 @@ from dataclasses import dataclass
18
18
  from rich.console import Console
19
19
  from rich.live import Live
20
20
  import inspect
21
- from .model_capabilities import is_gemini_internal_tool
22
21
 
23
22
  # Constants
24
23
  LOCAL_SERVER_API_KEY_PLACEHOLDER = "not-needed"
@@ -407,10 +406,14 @@ class OpenAIClient:
407
406
  if tool_def:
408
407
  formatted_tools.append(tool_def)
409
408
  # Handle Gemini internal tools (e.g., {"googleSearch": {}}, {"urlContext": {}}, {"codeExecution": {}})
410
- elif is_gemini_internal_tool(tool):
409
+ elif isinstance(tool, dict) and len(tool) == 1:
411
410
  tool_name = next(iter(tool.keys()))
412
- logging.debug(f"Using Gemini internal tool: {tool_name}")
413
- formatted_tools.append(tool)
411
+ gemini_internal_tools = {'googleSearch', 'urlContext', 'codeExecution'}
412
+ if tool_name in gemini_internal_tools:
413
+ logging.debug(f"Using Gemini internal tool: {tool_name}")
414
+ formatted_tools.append(tool)
415
+ else:
416
+ logging.debug(f"Skipping unknown tool: {tool_name}")
414
417
  else:
415
418
  logging.debug(f"Skipping tool of unsupported type: {type(tool)}")
416
419
 
@@ -78,7 +78,7 @@ class Session:
78
78
  if not self.is_remote:
79
79
  default_memory_config = {
80
80
  "provider": "rag",
81
- "use_embedding": True,
81
+ "use_embedding": False, # Disable embeddings to avoid OpenAI API key requirement
82
82
  "rag_db_path": f".praison/sessions/{self.session_id}/chroma_db"
83
83
  }
84
84
  if memory_config:
@@ -96,6 +96,7 @@ class Session:
96
96
  self._memory = None
97
97
  self._knowledge = None
98
98
  self._agents_instance = None
99
+ self._agents = {} # Track agents and their chat histories
99
100
  else:
100
101
  # For remote sessions, disable local memory/knowledge
101
102
  self.memory_config = {}
@@ -103,6 +104,7 @@ class Session:
103
104
  self._memory = None
104
105
  self._knowledge = None
105
106
  self._agents_instance = None
107
+ self._agents = {} # Track agents and their chat histories
106
108
 
107
109
  @property
108
110
  def memory(self) -> Memory:
@@ -170,7 +172,23 @@ class Session:
170
172
  agent_kwargs["knowledge"] = knowledge
171
173
  agent_kwargs["knowledge_config"] = self.knowledge_config
172
174
 
173
- return Agent(**agent_kwargs)
175
+ agent = Agent(**agent_kwargs)
176
+
177
+ # Create a unique key for this agent (using name and role)
178
+ agent_key = f"{name}:{role}"
179
+
180
+ # Restore chat history if it exists from previous sessions
181
+ if agent_key in self._agents:
182
+ agent.chat_history = self._agents[agent_key].get("chat_history", [])
183
+ else:
184
+ # Try to restore from memory for backward compatibility
185
+ restored_history = self._restore_agent_chat_history(agent_key)
186
+ agent.chat_history = restored_history
187
+
188
+ # Track the agent
189
+ self._agents[agent_key] = {"agent": agent, "chat_history": agent.chat_history}
190
+
191
+ return agent
174
192
 
175
193
  # Keep create_agent for backward compatibility
176
194
  def create_agent(self, *args, **kwargs) -> Agent:
@@ -189,6 +207,11 @@ class Session:
189
207
  """
190
208
  if self.is_remote:
191
209
  raise ValueError("State operations are not available for remote agent sessions")
210
+
211
+ # Save agent chat histories first
212
+ self._save_agent_chat_histories()
213
+
214
+ # Save session state
192
215
  state_text = f"Session state: {state_data}"
193
216
  self.memory.store_short_term(
194
217
  text=state_text,
@@ -212,12 +235,15 @@ class Session:
212
235
  """
213
236
  if self.is_remote:
214
237
  raise ValueError("State operations are not available for remote agent sessions")
215
- # Use metadata-based search for better SQLite compatibility
238
+ # Use content-based search for better SQLite compatibility
216
239
  results = self.memory.search_short_term(
217
- query=f"type:session_state",
240
+ query="Session state:",
218
241
  limit=10 # Get more results to filter by session_id
219
242
  )
220
243
 
244
+ # Restore agent chat histories first
245
+ self._restore_agent_chat_histories()
246
+
221
247
  # Filter results by session_id in metadata
222
248
  for result in results:
223
249
  metadata = result.get("metadata", {})
@@ -230,6 +256,97 @@ class Session:
230
256
 
231
257
  return {}
232
258
 
259
+ def _restore_agent_chat_history(self, agent_key: str) -> List[Dict[str, Any]]:
260
+ """
261
+ Restore agent chat history from memory.
262
+
263
+ Args:
264
+ agent_key: Unique identifier for the agent
265
+
266
+ Returns:
267
+ List of chat history messages or empty list if not found
268
+ """
269
+ if self.is_remote:
270
+ return []
271
+
272
+ # Search for agent chat history in memory
273
+ results = self.memory.search_short_term(
274
+ query="Agent chat history for",
275
+ limit=10
276
+ )
277
+
278
+ # Filter results by session_id and agent_key
279
+ for result in results:
280
+ metadata = result.get("metadata", {})
281
+ if (metadata.get("type") == "agent_chat_history" and
282
+ metadata.get("session_id") == self.session_id and
283
+ metadata.get("agent_key") == agent_key):
284
+ # Extract chat history from metadata
285
+ chat_history = metadata.get("chat_history", [])
286
+ return chat_history
287
+
288
+ return []
289
+
290
+ def _restore_agent_chat_histories(self) -> None:
291
+ """
292
+ Restore all agent chat histories from memory.
293
+ """
294
+ if self.is_remote:
295
+ return
296
+
297
+ # Search for all agent chat histories in memory
298
+ results = self.memory.search_short_term(
299
+ query="Agent chat history for",
300
+ limit=50 # Get many results to find all agents
301
+ )
302
+
303
+ # Filter and restore chat histories for this session
304
+ for result in results:
305
+ metadata = result.get("metadata", {})
306
+ if (metadata.get("type") == "agent_chat_history" and
307
+ metadata.get("session_id") == self.session_id):
308
+ agent_key = metadata.get("agent_key")
309
+ chat_history = metadata.get("chat_history", [])
310
+
311
+ if agent_key and chat_history:
312
+ # Store in _agents dict for later retrieval
313
+ self._agents[agent_key] = {
314
+ "agent": None, # Will be populated when Agent is created
315
+ "chat_history": chat_history
316
+ }
317
+
318
+ def _save_agent_chat_histories(self) -> None:
319
+ """
320
+ Save all agent chat histories to memory.
321
+ """
322
+ if self.is_remote:
323
+ return
324
+
325
+ for agent_key, agent_data in self._agents.items():
326
+ agent = agent_data.get("agent")
327
+ chat_history = None
328
+
329
+ # Prioritize history from the live agent object, but fall back to restored history
330
+ if agent and hasattr(agent, 'chat_history'):
331
+ chat_history = agent.chat_history
332
+ agent_data["chat_history"] = chat_history # Ensure tracked history is up-to-date
333
+ else:
334
+ chat_history = agent_data.get("chat_history")
335
+
336
+ if chat_history is not None:
337
+ # Save to memory
338
+ history_text = f"Agent chat history for {agent_key}"
339
+ self.memory.store_short_term(
340
+ text=history_text,
341
+ metadata={
342
+ "type": "agent_chat_history",
343
+ "session_id": self.session_id,
344
+ "user_id": self.user_id,
345
+ "agent_key": agent_key,
346
+ "chat_history": chat_history
347
+ }
348
+ )
349
+
233
350
  def get_state(self, key: str, default: Any = None) -> Any:
234
351
  """Get a specific state value"""
235
352
  state = self.restore_state()
@@ -241,6 +358,11 @@ class Session:
241
358
  current_state[key] = value
242
359
  self.save_state(current_state)
243
360
 
361
+ def increment_state(self, key: str, increment: int = 1, default: int = 0) -> None:
362
+ """Increment a numeric state value"""
363
+ current_value = self.get_state(key, default)
364
+ self.set_state(key, current_value + increment)
365
+
244
366
  def add_memory(self, text: str, memory_type: str = "long", **metadata) -> None:
245
367
  """
246
368
  Add information to session memory.
@@ -1,13 +1,12 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.134
3
+ Version: 0.0.136
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
7
7
  Requires-Dist: pydantic
8
8
  Requires-Dist: rich
9
9
  Requires-Dist: openai
10
- Requires-Dist: mcp>=1.6.0
11
10
  Requires-Dist: posthog>=3.0.0
12
11
  Requires-Dist: aiohttp>=3.8.0
13
12
  Provides-Extra: mcp
@@ -72,6 +72,7 @@ tests/test_http_stream_basic.py
72
72
  tests/test_llm_self_reflection_direct.py
73
73
  tests/test_ollama_async_fix.py
74
74
  tests/test_ollama_fix.py
75
+ tests/test_ollama_sequential_fix.py
75
76
  tests/test_posthog_fixed.py
76
77
  tests/test_self_reflection_comprehensive.py
77
78
  tests/test_self_reflection_fix_simple.py
@@ -1,7 +1,6 @@
1
1
  pydantic
2
2
  rich
3
3
  openai
4
- mcp>=1.6.0
5
4
  posthog>=3.0.0
6
5
  aiohttp>=3.8.0
7
6
 
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
4
4
 
5
5
  [project]
6
6
  name = "praisonaiagents"
7
- version = "0.0.134"
7
+ version = "0.0.136"
8
8
  description = "Praison AI agents for completing complex tasks with Self Reflection Agents"
9
9
  requires-python = ">=3.10"
10
10
  authors = [
@@ -14,7 +14,6 @@ dependencies = [
14
14
  "pydantic",
15
15
  "rich",
16
16
  "openai",
17
- "mcp>=1.6.0",
18
17
  "posthog>=3.0.0",
19
18
  "aiohttp>=3.8.0"
20
19
  ]
@@ -0,0 +1,129 @@
1
+ #!/usr/bin/env python3
2
+ """
3
+ Test script to verify Ollama sequential tool calling argument mixing fix.
4
+
5
+ This test validates that the parameter validation and filtering fix correctly handles
6
+ the case where Ollama generates tool calls with mixed parameters from different functions.
7
+ """
8
+
9
+ import logging
10
+ from praisonaiagents.llm.llm import LLM
11
+
12
+ # Enable debug logging
13
+ logging.basicConfig(level=logging.DEBUG)
14
+
15
+ # Test functions matching the issue description
16
+ def get_stock_price(company_name: str) -> str:
17
+ """
18
+ Get the stock price of a company
19
+
20
+ Args:
21
+ company_name (str): The name of the company
22
+
23
+ Returns:
24
+ str: The stock price of the company
25
+ """
26
+ return f"The stock price of {company_name} is 100"
27
+
28
+ def multiply(a: int, b: int) -> int:
29
+ """
30
+ Multiply two numbers
31
+ """
32
+ return a * b
33
+
34
+ def test_ollama_argument_validation():
35
+ """
36
+ Test the Ollama argument validation and filtering functionality.
37
+ """
38
+ print("Testing Ollama argument validation and filtering...")
39
+
40
+ llm = LLM(model="ollama/llama3.2")
41
+ tools = [get_stock_price, multiply]
42
+
43
+ # Test case 1: Valid arguments (should pass through unchanged)
44
+ print("\n1. Testing valid arguments:")
45
+ valid_args = {"a": 100, "b": 2}
46
+ filtered_args = llm._validate_and_filter_ollama_arguments("multiply", valid_args, tools)
47
+ print(f"Original: {valid_args}")
48
+ print(f"Filtered: {filtered_args}")
49
+ assert filtered_args == valid_args, "Valid arguments should pass through unchanged"
50
+ print("✅ Valid arguments test passed")
51
+
52
+ # Test case 2: Mixed arguments (the actual issue from #918)
53
+ print("\n2. Testing mixed arguments (the main issue):")
54
+ mixed_args = {"a": "get_stock_price", "company_name": "Google", "b": "2"}
55
+ filtered_args = llm._validate_and_filter_ollama_arguments("multiply", mixed_args, tools)
56
+ expected_filtered = {"a": "get_stock_price", "b": "2"} # Should remove 'company_name'
57
+ print(f"Original: {mixed_args}")
58
+ print(f"Filtered: {filtered_args}")
59
+ print(f"Expected: {expected_filtered}")
60
+ assert filtered_args == expected_filtered, f"Expected {expected_filtered}, got {filtered_args}"
61
+ print("✅ Mixed arguments filtering test passed")
62
+
63
+ # Test case 3: All invalid arguments
64
+ print("\n3. Testing all invalid arguments:")
65
+ invalid_args = {"invalid_param1": "value1", "invalid_param2": "value2"}
66
+ filtered_args = llm._validate_and_filter_ollama_arguments("multiply", invalid_args, tools)
67
+ expected_empty = {}
68
+ print(f"Original: {invalid_args}")
69
+ print(f"Filtered: {filtered_args}")
70
+ assert filtered_args == expected_empty, "All invalid arguments should be filtered out"
71
+ print("✅ Invalid arguments filtering test passed")
72
+
73
+ # Test case 4: Function not found in tools
74
+ print("\n4. Testing function not found:")
75
+ some_args = {"param": "value"}
76
+ filtered_args = llm._validate_and_filter_ollama_arguments("nonexistent_function", some_args, tools)
77
+ print(f"Original: {some_args}")
78
+ print(f"Filtered: {filtered_args}")
79
+ assert filtered_args == some_args, "Arguments should pass through if function not found"
80
+ print("✅ Function not found test passed")
81
+
82
+ # Test case 5: Empty tools list
83
+ print("\n5. Testing empty tools list:")
84
+ some_args = {"param": "value"}
85
+ filtered_args = llm._validate_and_filter_ollama_arguments("multiply", some_args, [])
86
+ print(f"Original: {some_args}")
87
+ print(f"Filtered: {filtered_args}")
88
+ assert filtered_args == some_args, "Arguments should pass through if no tools provided"
89
+ print("✅ Empty tools test passed")
90
+
91
+ print("\n🎉 All Ollama argument validation tests passed!")
92
+ return True
93
+
94
+ def test_provider_detection():
95
+ """
96
+ Test the Ollama provider detection functionality.
97
+ """
98
+ print("\nTesting Ollama provider detection...")
99
+
100
+ # Test Ollama provider detection
101
+ ollama_llm = LLM(model="ollama/llama3.2")
102
+ assert ollama_llm._is_ollama_provider(), "Should detect ollama/ prefix"
103
+ print("✅ Ollama prefix detection works")
104
+
105
+ # Test non-Ollama provider
106
+ openai_llm = LLM(model="gpt-4o-mini")
107
+ assert not openai_llm._is_ollama_provider(), "Should not detect OpenAI as Ollama"
108
+ print("✅ Non-Ollama provider detection works")
109
+
110
+ print("✅ Provider detection tests passed!")
111
+ return True
112
+
113
+ if __name__ == "__main__":
114
+ print("Running Ollama sequential tool calling fix tests...")
115
+ print("=" * 60)
116
+
117
+ # Run tests
118
+ try:
119
+ test_provider_detection()
120
+ test_ollama_argument_validation()
121
+
122
+ print("\n" + "=" * 60)
123
+ print("🎉 ALL TESTS PASSED!")
124
+ print("The Ollama sequential tool calling argument mixing issue has been fixed!")
125
+
126
+ except Exception as e:
127
+ print(f"\n❌ TEST FAILED: {e}")
128
+ import traceback
129
+ traceback.print_exc()