praisonaiagents 0.0.96__py3-none-any.whl → 0.0.98__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -11,6 +11,8 @@ from .agents.autoagents import AutoAgents
11
11
  from .knowledge.knowledge import Knowledge
12
12
  from .knowledge.chunking import Chunking
13
13
  from .mcp.mcp import MCP
14
+ from .session import Session
15
+ from .guardrails import GuardrailResult, LLMGuardrail
14
16
  from .main import (
15
17
  TaskOutput,
16
18
  ReflectionOutput,
@@ -40,6 +42,7 @@ __all__ = [
40
42
  'TaskOutput',
41
43
  'ReflectionOutput',
42
44
  'AutoAgents',
45
+ 'Session',
43
46
  'display_interaction',
44
47
  'display_self_reflection',
45
48
  'display_instruction',
@@ -53,5 +56,7 @@ __all__ = [
53
56
  'async_display_callbacks',
54
57
  'Knowledge',
55
58
  'Chunking',
56
- 'MCP'
59
+ 'MCP',
60
+ 'GuardrailResult',
61
+ 'LLMGuardrail'
57
62
  ]
@@ -16,7 +16,8 @@ from ..main import (
16
16
  display_self_reflection,
17
17
  ReflectionOutput,
18
18
  client,
19
- adisplay_instruction
19
+ adisplay_instruction,
20
+ approval_callback
20
21
  )
21
22
  import inspect
22
23
  import uuid
@@ -570,6 +571,35 @@ Your Goal: {self.goal}
570
571
  """
571
572
  logging.debug(f"{self.name} executing tool {function_name} with arguments: {arguments}")
572
573
 
574
+ # Check if approval is required for this tool
575
+ from ..approval import is_approval_required, console_approval_callback, get_risk_level, mark_approved, ApprovalDecision
576
+ if is_approval_required(function_name):
577
+ risk_level = get_risk_level(function_name)
578
+ logging.info(f"Tool {function_name} requires approval (risk level: {risk_level})")
579
+
580
+ # Use global approval callback or default console callback
581
+ callback = approval_callback or console_approval_callback
582
+
583
+ try:
584
+ decision = callback(function_name, arguments, risk_level)
585
+ if not decision.approved:
586
+ error_msg = f"Tool execution denied: {decision.reason}"
587
+ logging.warning(error_msg)
588
+ return {"error": error_msg, "approval_denied": True}
589
+
590
+ # Mark as approved in context to prevent double approval in decorator
591
+ mark_approved(function_name)
592
+
593
+ # Use modified arguments if provided
594
+ if decision.modified_args:
595
+ arguments = decision.modified_args
596
+ logging.info(f"Using modified arguments: {arguments}")
597
+
598
+ except Exception as e:
599
+ error_msg = f"Error during approval process: {str(e)}"
600
+ logging.error(error_msg)
601
+ return {"error": error_msg, "approval_error": True}
602
+
573
603
  # Special handling for MCP tools
574
604
  # Check if tools is an MCP instance with the requested function name
575
605
  from ..mcp.mcp import MCP
@@ -758,69 +788,92 @@ Your Goal: {self.goal}
758
788
  )
759
789
  else:
760
790
  # Use the standard OpenAI client approach
761
- if stream:
762
- # Process as streaming response with formatted tools
763
- final_response = self._process_stream_response(
764
- messages,
765
- temperature,
766
- start_time,
767
- formatted_tools=formatted_tools if formatted_tools else None,
768
- reasoning_steps=reasoning_steps
769
- )
770
- else:
771
- # Process as regular non-streaming response
772
- final_response = client.chat.completions.create(
773
- model=self.llm,
774
- messages=messages,
775
- temperature=temperature,
776
- tools=formatted_tools if formatted_tools else None,
777
- stream=False
778
- )
791
+ # Continue tool execution loop until no more tool calls are needed
792
+ max_iterations = 10 # Prevent infinite loops
793
+ iteration_count = 0
794
+
795
+ while iteration_count < max_iterations:
796
+ if stream:
797
+ # Process as streaming response with formatted tools
798
+ final_response = self._process_stream_response(
799
+ messages,
800
+ temperature,
801
+ start_time,
802
+ formatted_tools=formatted_tools if formatted_tools else None,
803
+ reasoning_steps=reasoning_steps
804
+ )
805
+ else:
806
+ # Process as regular non-streaming response
807
+ final_response = client.chat.completions.create(
808
+ model=self.llm,
809
+ messages=messages,
810
+ temperature=temperature,
811
+ tools=formatted_tools if formatted_tools else None,
812
+ stream=False
813
+ )
779
814
 
780
- tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
815
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
781
816
 
782
- if tool_calls:
783
- messages.append({
784
- "role": "assistant",
785
- "content": final_response.choices[0].message.content,
786
- "tool_calls": tool_calls
787
- })
817
+ if tool_calls:
818
+ messages.append({
819
+ "role": "assistant",
820
+ "content": final_response.choices[0].message.content,
821
+ "tool_calls": tool_calls
822
+ })
788
823
 
789
- for tool_call in tool_calls:
790
- function_name = tool_call.function.name
791
- arguments = json.loads(tool_call.function.arguments)
824
+ for tool_call in tool_calls:
825
+ function_name = tool_call.function.name
826
+ arguments = json.loads(tool_call.function.arguments)
792
827
 
793
- if self.verbose:
794
- display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
828
+ if self.verbose:
829
+ display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
795
830
 
796
- tool_result = self.execute_tool(function_name, arguments)
797
- results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
831
+ tool_result = self.execute_tool(function_name, arguments)
832
+ results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
798
833
 
799
- if self.verbose:
800
- display_tool_call(f"Function '{function_name}' returned: {results_str}")
834
+ if self.verbose:
835
+ display_tool_call(f"Function '{function_name}' returned: {results_str}")
801
836
 
802
- messages.append({
803
- "role": "tool",
804
- "tool_call_id": tool_call.id,
805
- "content": results_str
806
- })
837
+ messages.append({
838
+ "role": "tool",
839
+ "tool_call_id": tool_call.id,
840
+ "content": results_str
841
+ })
807
842
 
808
- # Get final response after tool calls
809
- if stream:
810
- final_response = self._process_stream_response(
811
- messages,
812
- temperature,
813
- start_time,
814
- formatted_tools=formatted_tools if formatted_tools else None,
815
- reasoning_steps=reasoning_steps
816
- )
817
- else:
818
- final_response = client.chat.completions.create(
819
- model=self.llm,
820
- messages=messages,
821
- temperature=temperature,
822
- stream=False
823
- )
843
+ # Check if we should continue (for tools like sequential thinking)
844
+ should_continue = False
845
+ for tool_call in tool_calls:
846
+ function_name = tool_call.function.name
847
+ arguments = json.loads(tool_call.function.arguments)
848
+
849
+ # For sequential thinking tool, check if nextThoughtNeeded is True
850
+ if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
851
+ should_continue = True
852
+ break
853
+
854
+ if not should_continue:
855
+ # Get final response after tool calls
856
+ if stream:
857
+ final_response = self._process_stream_response(
858
+ messages,
859
+ temperature,
860
+ start_time,
861
+ formatted_tools=formatted_tools if formatted_tools else None,
862
+ reasoning_steps=reasoning_steps
863
+ )
864
+ else:
865
+ final_response = client.chat.completions.create(
866
+ model=self.llm,
867
+ messages=messages,
868
+ temperature=temperature,
869
+ stream=False
870
+ )
871
+ break
872
+
873
+ iteration_count += 1
874
+ else:
875
+ # No tool calls, we're done
876
+ break
824
877
 
825
878
  return final_response
826
879
 
@@ -982,43 +1035,7 @@ Your Goal: {self.goal}
982
1035
  if not response:
983
1036
  return None
984
1037
 
985
- tool_calls = getattr(response.choices[0].message, 'tool_calls', None)
986
1038
  response_text = response.choices[0].message.content.strip()
987
- if tool_calls: ## TODO: Most likely this tool call is already called in _chat_completion, so maybe we can remove this.
988
- messages.append({
989
- "role": "assistant",
990
- "content": response_text,
991
- "tool_calls": tool_calls
992
- })
993
-
994
- for tool_call in tool_calls:
995
- function_name = tool_call.function.name
996
- arguments = json.loads(tool_call.function.arguments)
997
-
998
- if self.verbose:
999
- display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}", console=self.console)
1000
-
1001
- tool_result = self.execute_tool(function_name, arguments)
1002
-
1003
- if tool_result:
1004
- if self.verbose:
1005
- display_tool_call(f"Function '{function_name}' returned: {tool_result}", console=self.console)
1006
- messages.append({
1007
- "role": "tool",
1008
- "tool_call_id": tool_call.id,
1009
- "content": json.dumps(tool_result)
1010
- })
1011
- else:
1012
- messages.append({
1013
- "role": "tool",
1014
- "tool_call_id": tool_call.id,
1015
- "content": "Function returned an empty output"
1016
- })
1017
-
1018
- response = self._chat_completion(messages, temperature=temperature, stream=stream)
1019
- if not response:
1020
- return None
1021
- response_text = response.choices[0].message.content.strip()
1022
1039
 
1023
1040
  # Handle output_json or output_pydantic if specified
1024
1041
  if output_json or output_pydantic:
@@ -1418,6 +1435,21 @@ Your Goal: {self.goal}
1418
1435
  """Async version of execute_tool"""
1419
1436
  try:
1420
1437
  logging.info(f"Executing async tool: {function_name} with arguments: {arguments}")
1438
+
1439
+ # Check if approval is required for this tool
1440
+ from ..approval import is_approval_required, request_approval
1441
+ if is_approval_required(function_name):
1442
+ decision = await request_approval(function_name, arguments)
1443
+ if not decision.approved:
1444
+ error_msg = f"Tool execution denied: {decision.reason}"
1445
+ logging.warning(error_msg)
1446
+ return {"error": error_msg, "approval_denied": True}
1447
+
1448
+ # Use modified arguments if provided
1449
+ if decision.modified_args:
1450
+ arguments = decision.modified_args
1451
+ logging.info(f"Using modified arguments: {arguments}")
1452
+
1421
1453
  # Try to find the function in the agent's tools list first
1422
1454
  func = None
1423
1455
  for tool in self.tools:
@@ -1661,6 +1693,7 @@ Your Goal: {self.goal}
1661
1693
  import threading
1662
1694
  import time
1663
1695
  import inspect
1696
+ import asyncio # Import asyncio in the MCP scope
1664
1697
  # logging is already imported at the module level
1665
1698
 
1666
1699
  except ImportError as e:
@@ -63,7 +63,6 @@ def process_task_context(context_item, verbose=0, user_id=None):
63
63
  """
64
64
  Process a single context item for task execution.
65
65
  This helper function avoids code duplication between async and sync execution methods.
66
-
67
66
  Args:
68
67
  context_item: The context item to process (can be string, list, task object, or dict)
69
68
  verbose: Verbosity level for logging
@@ -203,7 +202,6 @@ class PraisonAIAgents:
203
202
  mem_cfg = memory_config
204
203
  if not mem_cfg:
205
204
  mem_cfg = next((t.config.get('memory_config') for t in tasks if hasattr(t, 'config') and t.config), None)
206
-
207
205
  # Set default memory config if none provided
208
206
  if not mem_cfg:
209
207
  mem_cfg = {
@@ -215,7 +213,6 @@ class PraisonAIAgents:
215
213
  },
216
214
  "rag_db_path": "./.praison/chroma_db"
217
215
  }
218
-
219
216
  # Add embedder config if provided
220
217
  if embedder:
221
218
  if isinstance(embedder, dict):
@@ -231,17 +228,14 @@ class PraisonAIAgents:
231
228
  self.shared_memory = Memory(config=mem_cfg, verbose=verbose)
232
229
  if verbose >= 5:
233
230
  logger.info("Initialized shared memory for PraisonAIAgents")
234
-
235
231
  # Distribute memory to tasks
236
232
  for task in tasks:
237
233
  if not task.memory:
238
234
  task.memory = self.shared_memory
239
235
  if verbose >= 5:
240
236
  logger.info(f"Assigned shared memory to task {task.id}")
241
-
242
237
  except Exception as e:
243
238
  logger.error(f"Failed to initialize shared memory: {e}")
244
-
245
239
  # Update tasks with shared memory
246
240
  if self.shared_memory:
247
241
  for task in tasks:
@@ -898,6 +892,105 @@ Context:
898
892
  def clear_state(self) -> None:
899
893
  """Clear all state values"""
900
894
  self._state.clear()
895
+
896
+ # Convenience methods for enhanced state management
897
+ def has_state(self, key: str) -> bool:
898
+ """Check if a state key exists"""
899
+ return key in self._state
900
+
901
+ def get_all_state(self) -> Dict[str, Any]:
902
+ """Get a copy of the entire state dictionary"""
903
+ return self._state.copy()
904
+
905
+ def delete_state(self, key: str) -> bool:
906
+ """Delete a state key if it exists. Returns True if deleted, False if key didn't exist."""
907
+ if key in self._state:
908
+ del self._state[key]
909
+ return True
910
+ return False
911
+
912
+ def increment_state(self, key: str, amount: float = 1, default: float = 0) -> float:
913
+ """Increment a numeric state value. Creates the key with default if it doesn't exist."""
914
+ current = self._state.get(key, default)
915
+ if not isinstance(current, (int, float)):
916
+ raise TypeError(f"Cannot increment non-numeric value at key '{key}': {type(current).__name__}")
917
+ new_value = current + amount
918
+ self._state[key] = new_value
919
+ return new_value
920
+
921
+ def append_to_state(self, key: str, value: Any, max_length: Optional[int] = None) -> List[Any]:
922
+ """Append a value to a list state. Creates the list if it doesn't exist.
923
+
924
+ Args:
925
+ key: State key
926
+ value: Value to append
927
+ max_length: Optional maximum length for the list
928
+
929
+ Returns:
930
+ The updated list
931
+
932
+ Raises:
933
+ TypeError: If the existing value is not a list and convert_to_list=False
934
+ """
935
+ if key not in self._state:
936
+ self._state[key] = []
937
+ elif not isinstance(self._state[key], list):
938
+ # Be explicit about type conversion for better user experience
939
+ current_value = self._state[key]
940
+ self._state[key] = [current_value]
941
+
942
+ self._state[key].append(value)
943
+
944
+ # Trim list if max_length is specified
945
+ if max_length and len(self._state[key]) > max_length:
946
+ self._state[key] = self._state[key][-max_length:]
947
+
948
+ return self._state[key]
949
+
950
+ def save_session_state(self, session_id: str, include_memory: bool = True) -> None:
951
+ """Save current state to memory for session persistence"""
952
+ if self.shared_memory and include_memory:
953
+ state_data = {
954
+ "session_id": session_id,
955
+ "user_id": self.user_id,
956
+ "run_id": self.run_id,
957
+ "state": self._state,
958
+ "agents": [agent.name for agent in self.agents],
959
+ "process": self.process
960
+ }
961
+ self.shared_memory.store_short_term(
962
+ text=f"Session state for {session_id}",
963
+ metadata={
964
+ "type": "session_state",
965
+ "session_id": session_id,
966
+ "user_id": self.user_id,
967
+ "state_data": state_data
968
+ }
969
+ )
970
+
971
+ def restore_session_state(self, session_id: str) -> bool:
972
+ """Restore state from memory for session persistence. Returns True if restored."""
973
+ if not self.shared_memory:
974
+ return False
975
+
976
+ # Use metadata-based search for better SQLite compatibility
977
+ results = self.shared_memory.search_short_term(
978
+ query=f"type:session_state",
979
+ limit=10 # Get more results to filter by session_id
980
+ )
981
+
982
+ # Filter results by session_id in metadata
983
+ for result in results:
984
+ metadata = result.get("metadata", {})
985
+ if (metadata.get("type") == "session_state" and
986
+ metadata.get("session_id") == session_id):
987
+ state_data = metadata.get("state_data", {})
988
+ if "state" in state_data:
989
+ # Merge with existing state instead of replacing
990
+ self._state.update(state_data["state"])
991
+ return True
992
+
993
+ return False
901
994
 
902
995
  def launch(self, path: str = '/agents', port: int = 8000, host: str = '0.0.0.0', debug: bool = False, protocol: str = "http"):
903
996
  """