praisonaiagents 0.0.97__tar.gz → 0.0.99__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/PKG-INFO +1 -1
  2. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/__init__.py +4 -1
  3. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/agent/agent.py +283 -66
  4. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/llm/llm.py +248 -148
  5. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/task/task.py +121 -3
  6. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents.egg-info/PKG-INFO +1 -1
  7. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/pyproject.toml +1 -1
  8. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/README.md +0 -0
  9. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/agent/__init__.py +0 -0
  10. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/agent/image_agent.py +0 -0
  11. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/agents/__init__.py +0 -0
  12. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/agents/agents.py +0 -0
  13. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/agents/autoagents.py +0 -0
  14. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/approval.py +0 -0
  15. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/knowledge/__init__.py +0 -0
  16. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/knowledge/chunking.py +0 -0
  17. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/knowledge/knowledge.py +0 -0
  18. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/llm/__init__.py +0 -0
  19. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/main.py +0 -0
  20. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/mcp/__init__.py +0 -0
  21. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/mcp/mcp.py +0 -0
  22. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/mcp/mcp_sse.py +0 -0
  23. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/memory/memory.py +0 -0
  24. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/process/__init__.py +0 -0
  25. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/process/process.py +0 -0
  26. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/session.py +0 -0
  27. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/task/__init__.py +0 -0
  28. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/__init__.py +0 -0
  29. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/arxiv_tools.py +0 -0
  30. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/calculator_tools.py +0 -0
  31. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/csv_tools.py +0 -0
  32. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/duckdb_tools.py +0 -0
  33. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/duckduckgo_tools.py +0 -0
  34. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/excel_tools.py +0 -0
  35. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/file_tools.py +0 -0
  36. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/json_tools.py +0 -0
  37. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/newspaper_tools.py +0 -0
  38. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/pandas_tools.py +0 -0
  39. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/python_tools.py +0 -0
  40. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/shell_tools.py +0 -0
  41. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/spider_tools.py +0 -0
  42. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/test.py +0 -0
  43. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/tools.py +0 -0
  44. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/train/data/generatecot.py +0 -0
  45. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/wikipedia_tools.py +0 -0
  46. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/xml_tools.py +0 -0
  47. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/yaml_tools.py +0 -0
  48. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents/tools/yfinance_tools.py +0 -0
  49. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents.egg-info/SOURCES.txt +0 -0
  50. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents.egg-info/dependency_links.txt +0 -0
  51. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents.egg-info/requires.txt +0 -0
  52. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/praisonaiagents.egg-info/top_level.txt +0 -0
  53. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/setup.cfg +0 -0
  54. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/tests/test-graph-memory.py +0 -0
  55. {praisonaiagents-0.0.97 → praisonaiagents-0.0.99}/tests/test.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: praisonaiagents
3
- Version: 0.0.97
3
+ Version: 0.0.99
4
4
  Summary: Praison AI agents for completing complex tasks with Self Reflection Agents
5
5
  Author: Mervin Praison
6
6
  Requires-Python: >=3.10
@@ -12,6 +12,7 @@ from .knowledge.knowledge import Knowledge
12
12
  from .knowledge.chunking import Chunking
13
13
  from .mcp.mcp import MCP
14
14
  from .session import Session
15
+ from .guardrails import GuardrailResult, LLMGuardrail
15
16
  from .main import (
16
17
  TaskOutput,
17
18
  ReflectionOutput,
@@ -55,5 +56,7 @@ __all__ = [
55
56
  'async_display_callbacks',
56
57
  'Knowledge',
57
58
  'Chunking',
58
- 'MCP'
59
+ 'MCP',
60
+ 'GuardrailResult',
61
+ 'LLMGuardrail'
59
62
  ]
@@ -3,7 +3,7 @@ import time
3
3
  import json
4
4
  import logging
5
5
  import asyncio
6
- from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING
6
+ from typing import List, Optional, Any, Dict, Union, Literal, TYPE_CHECKING, Callable, Tuple
7
7
  from rich.console import Console
8
8
  from rich.live import Live
9
9
  from openai import AsyncOpenAI
@@ -32,6 +32,7 @@ _shared_apps = {} # Dict of port -> FastAPI app
32
32
 
33
33
  if TYPE_CHECKING:
34
34
  from ..task.task import Task
35
+ from ..main import TaskOutput
35
36
 
36
37
  @dataclass
37
38
  class ChatCompletionMessage:
@@ -368,7 +369,9 @@ class Agent:
368
369
  min_reflect: int = 1,
369
370
  reflect_llm: Optional[str] = None,
370
371
  user_id: Optional[str] = None,
371
- reasoning_steps: bool = False
372
+ reasoning_steps: bool = False,
373
+ guardrail: Optional[Union[Callable[['TaskOutput'], Tuple[bool, Any]], str]] = None,
374
+ max_guardrail_retries: int = 3
372
375
  ):
373
376
  # Add check at start if memory is requested
374
377
  if memory is not None:
@@ -483,6 +486,12 @@ Your Goal: {self.goal}
483
486
  # Store user_id
484
487
  self.user_id = user_id or "praison"
485
488
  self.reasoning_steps = reasoning_steps
489
+
490
+ # Initialize guardrail settings
491
+ self.guardrail = guardrail
492
+ self.max_guardrail_retries = max_guardrail_retries
493
+ self._guardrail_fn = None
494
+ self._setup_guardrail()
486
495
 
487
496
  # Check if knowledge parameter has any values
488
497
  if not knowledge:
@@ -512,6 +521,152 @@ Your Goal: {self.goal}
512
521
  except Exception as e:
513
522
  logging.error(f"Error processing knowledge item: {knowledge_item}, error: {e}")
514
523
 
524
+ def _setup_guardrail(self):
525
+ """Setup the guardrail function based on the provided guardrail parameter."""
526
+ if self.guardrail is None:
527
+ self._guardrail_fn = None
528
+ return
529
+
530
+ if callable(self.guardrail):
531
+ # Validate function signature
532
+ sig = inspect.signature(self.guardrail)
533
+ positional_args = [
534
+ param for param in sig.parameters.values()
535
+ if param.default is inspect.Parameter.empty
536
+ ]
537
+ if len(positional_args) != 1:
538
+ raise ValueError("Agent guardrail function must accept exactly one parameter (TaskOutput)")
539
+
540
+ # Check return annotation if present
541
+ from typing import get_args, get_origin
542
+ return_annotation = sig.return_annotation
543
+ if return_annotation != inspect.Signature.empty:
544
+ return_annotation_args = get_args(return_annotation)
545
+ if not (
546
+ get_origin(return_annotation) is tuple
547
+ and len(return_annotation_args) == 2
548
+ and return_annotation_args[0] is bool
549
+ and (
550
+ return_annotation_args[1] is Any
551
+ or return_annotation_args[1] is str
552
+ or str(return_annotation_args[1]).endswith('TaskOutput')
553
+ or str(return_annotation_args[1]).startswith('typing.Union')
554
+ )
555
+ ):
556
+ raise ValueError(
557
+ "If return type is annotated, it must be Tuple[bool, Any] or Tuple[bool, Union[str, TaskOutput]]"
558
+ )
559
+
560
+ self._guardrail_fn = self.guardrail
561
+ elif isinstance(self.guardrail, str):
562
+ # Create LLM-based guardrail
563
+ from ..guardrails import LLMGuardrail
564
+ llm = getattr(self, 'llm', None) or getattr(self, 'llm_instance', None)
565
+ self._guardrail_fn = LLMGuardrail(description=self.guardrail, llm=llm)
566
+ else:
567
+ raise ValueError("Agent guardrail must be either a callable or a string description")
568
+
569
+ def _process_guardrail(self, task_output):
570
+ """Process the guardrail validation for a task output.
571
+
572
+ Args:
573
+ task_output: The task output to validate
574
+
575
+ Returns:
576
+ GuardrailResult: The result of the guardrail validation
577
+ """
578
+ from ..guardrails import GuardrailResult
579
+
580
+ if not self._guardrail_fn:
581
+ return GuardrailResult(success=True, result=task_output)
582
+
583
+ try:
584
+ # Call the guardrail function
585
+ result = self._guardrail_fn(task_output)
586
+
587
+ # Convert the result to a GuardrailResult
588
+ return GuardrailResult.from_tuple(result)
589
+
590
+ except Exception as e:
591
+ logging.error(f"Agent {self.name}: Error in guardrail validation: {e}")
592
+ # On error, return failure
593
+ return GuardrailResult(
594
+ success=False,
595
+ result=None,
596
+ error=f"Agent guardrail validation error: {str(e)}"
597
+ )
598
+
599
+ def _apply_guardrail_with_retry(self, response_text, prompt, temperature=0.2, tools=None):
600
+ """Apply guardrail validation with retry logic.
601
+
602
+ Args:
603
+ response_text: The response to validate
604
+ prompt: Original prompt for regeneration if needed
605
+ temperature: Temperature for regeneration
606
+ tools: Tools for regeneration
607
+
608
+ Returns:
609
+ str: The validated response text or None if validation fails after retries
610
+ """
611
+ if not self._guardrail_fn:
612
+ return response_text
613
+
614
+ from ..main import TaskOutput
615
+
616
+ retry_count = 0
617
+ current_response = response_text
618
+
619
+ while retry_count <= self.max_guardrail_retries:
620
+ # Create TaskOutput object
621
+ task_output = TaskOutput(
622
+ raw=current_response,
623
+ output=current_response,
624
+ pydantic=None,
625
+ json_dict=None,
626
+ name=f"{self.name}_output",
627
+ description="Agent response output"
628
+ )
629
+
630
+ # Process guardrail
631
+ guardrail_result = self._process_guardrail(task_output)
632
+
633
+ if guardrail_result.success:
634
+ logging.info(f"Agent {self.name}: Guardrail validation passed")
635
+ # Return the potentially modified result
636
+ if guardrail_result.result and hasattr(guardrail_result.result, 'raw'):
637
+ return guardrail_result.result.raw
638
+ elif guardrail_result.result:
639
+ return str(guardrail_result.result)
640
+ else:
641
+ return current_response
642
+
643
+ # Guardrail failed
644
+ if retry_count >= self.max_guardrail_retries:
645
+ raise Exception(
646
+ f"Agent {self.name} response failed guardrail validation after {self.max_guardrail_retries} retries. "
647
+ f"Last error: {guardrail_result.error}"
648
+ )
649
+
650
+ retry_count += 1
651
+ logging.warning(f"Agent {self.name}: Guardrail validation failed (retry {retry_count}/{self.max_guardrail_retries}): {guardrail_result.error}")
652
+
653
+ # Regenerate response for retry
654
+ try:
655
+ retry_prompt = f"{prompt}\n\nNote: Previous response failed validation due to: {guardrail_result.error}. Please provide an improved response."
656
+ response = self._chat_completion([{"role": "user", "content": retry_prompt}], temperature, tools)
657
+ if response and response.choices:
658
+ current_response = response.choices[0].message.content.strip()
659
+ else:
660
+ raise Exception("Failed to generate retry response")
661
+ except Exception as e:
662
+ logging.error(f"Agent {self.name}: Error during guardrail retry: {e}")
663
+ # If we can't regenerate, fail the guardrail
664
+ raise Exception(
665
+ f"Agent {self.name} guardrail retry failed: {e}"
666
+ )
667
+
668
+ return current_response
669
+
515
670
  def generate_task(self) -> 'Task':
516
671
  """Generate a Task object from the agent's instructions"""
517
672
  from ..task.task import Task
@@ -788,69 +943,92 @@ Your Goal: {self.goal}
788
943
  )
789
944
  else:
790
945
  # Use the standard OpenAI client approach
791
- if stream:
792
- # Process as streaming response with formatted tools
793
- final_response = self._process_stream_response(
794
- messages,
795
- temperature,
796
- start_time,
797
- formatted_tools=formatted_tools if formatted_tools else None,
798
- reasoning_steps=reasoning_steps
799
- )
800
- else:
801
- # Process as regular non-streaming response
802
- final_response = client.chat.completions.create(
803
- model=self.llm,
804
- messages=messages,
805
- temperature=temperature,
806
- tools=formatted_tools if formatted_tools else None,
807
- stream=False
808
- )
809
-
810
- tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
811
-
812
- if tool_calls:
813
- messages.append({
814
- "role": "assistant",
815
- "content": final_response.choices[0].message.content,
816
- "tool_calls": tool_calls
817
- })
946
+ # Continue tool execution loop until no more tool calls are needed
947
+ max_iterations = 10 # Prevent infinite loops
948
+ iteration_count = 0
949
+
950
+ while iteration_count < max_iterations:
951
+ if stream:
952
+ # Process as streaming response with formatted tools
953
+ final_response = self._process_stream_response(
954
+ messages,
955
+ temperature,
956
+ start_time,
957
+ formatted_tools=formatted_tools if formatted_tools else None,
958
+ reasoning_steps=reasoning_steps
959
+ )
960
+ else:
961
+ # Process as regular non-streaming response
962
+ final_response = client.chat.completions.create(
963
+ model=self.llm,
964
+ messages=messages,
965
+ temperature=temperature,
966
+ tools=formatted_tools if formatted_tools else None,
967
+ stream=False
968
+ )
818
969
 
819
- for tool_call in tool_calls:
820
- function_name = tool_call.function.name
821
- arguments = json.loads(tool_call.function.arguments)
970
+ tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
822
971
 
823
- if self.verbose:
824
- display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
972
+ if tool_calls:
973
+ messages.append({
974
+ "role": "assistant",
975
+ "content": final_response.choices[0].message.content,
976
+ "tool_calls": tool_calls
977
+ })
825
978
 
826
- tool_result = self.execute_tool(function_name, arguments)
827
- results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
979
+ for tool_call in tool_calls:
980
+ function_name = tool_call.function.name
981
+ arguments = json.loads(tool_call.function.arguments)
828
982
 
829
- if self.verbose:
830
- display_tool_call(f"Function '{function_name}' returned: {results_str}")
983
+ if self.verbose:
984
+ display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
831
985
 
832
- messages.append({
833
- "role": "tool",
834
- "tool_call_id": tool_call.id,
835
- "content": results_str
836
- })
986
+ tool_result = self.execute_tool(function_name, arguments)
987
+ results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
837
988
 
838
- # Get final response after tool calls
839
- if stream:
840
- final_response = self._process_stream_response(
841
- messages,
842
- temperature,
843
- start_time,
844
- formatted_tools=formatted_tools if formatted_tools else None,
845
- reasoning_steps=reasoning_steps
846
- )
847
- else:
848
- final_response = client.chat.completions.create(
849
- model=self.llm,
850
- messages=messages,
851
- temperature=temperature,
852
- stream=False
853
- )
989
+ if self.verbose:
990
+ display_tool_call(f"Function '{function_name}' returned: {results_str}")
991
+
992
+ messages.append({
993
+ "role": "tool",
994
+ "tool_call_id": tool_call.id,
995
+ "content": results_str
996
+ })
997
+
998
+ # Check if we should continue (for tools like sequential thinking)
999
+ should_continue = False
1000
+ for tool_call in tool_calls:
1001
+ function_name = tool_call.function.name
1002
+ arguments = json.loads(tool_call.function.arguments)
1003
+
1004
+ # For sequential thinking tool, check if nextThoughtNeeded is True
1005
+ if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
1006
+ should_continue = True
1007
+ break
1008
+
1009
+ if not should_continue:
1010
+ # Get final response after tool calls
1011
+ if stream:
1012
+ final_response = self._process_stream_response(
1013
+ messages,
1014
+ temperature,
1015
+ start_time,
1016
+ formatted_tools=formatted_tools if formatted_tools else None,
1017
+ reasoning_steps=reasoning_steps
1018
+ )
1019
+ else:
1020
+ final_response = client.chat.completions.create(
1021
+ model=self.llm,
1022
+ messages=messages,
1023
+ temperature=temperature,
1024
+ stream=False
1025
+ )
1026
+ break
1027
+
1028
+ iteration_count += 1
1029
+ else:
1030
+ # No tool calls, we're done
1031
+ break
854
1032
 
855
1033
  return final_response
856
1034
 
@@ -944,7 +1122,13 @@ Your Goal: {self.goal}
944
1122
  total_time = time.time() - start_time
945
1123
  logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
946
1124
 
947
- return response_text
1125
+ # Apply guardrail validation for custom LLM response
1126
+ try:
1127
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1128
+ return validated_response
1129
+ except Exception as e:
1130
+ logging.error(f"Agent {self.name}: Guardrail validation failed for custom LLM: {e}")
1131
+ return None
948
1132
  except Exception as e:
949
1133
  display_error(f"Error in LLM chat: {e}")
950
1134
  return None
@@ -1032,8 +1216,20 @@ Your Goal: {self.goal}
1032
1216
  display_interaction(original_prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1033
1217
  # Return only reasoning content if reasoning_steps is True
1034
1218
  if reasoning_steps and hasattr(response.choices[0].message, 'reasoning_content'):
1035
- return response.choices[0].message.reasoning_content
1036
- return response_text
1219
+ # Apply guardrail to reasoning content
1220
+ try:
1221
+ validated_reasoning = self._apply_guardrail_with_retry(response.choices[0].message.reasoning_content, original_prompt, temperature, tools)
1222
+ return validated_reasoning
1223
+ except Exception as e:
1224
+ logging.error(f"Agent {self.name}: Guardrail validation failed for reasoning content: {e}")
1225
+ return None
1226
+ # Apply guardrail to regular response
1227
+ try:
1228
+ validated_response = self._apply_guardrail_with_retry(response_text, original_prompt, temperature, tools)
1229
+ return validated_response
1230
+ except Exception as e:
1231
+ logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
1232
+ return None
1037
1233
 
1038
1234
  reflection_prompt = f"""
1039
1235
  Reflect on your previous response: '{response_text}'.
@@ -1066,7 +1262,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1066
1262
  self.chat_history.append({"role": "user", "content": prompt})
1067
1263
  self.chat_history.append({"role": "assistant", "content": response_text})
1068
1264
  display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1069
- return response_text
1265
+ # Apply guardrail validation after satisfactory reflection
1266
+ try:
1267
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1268
+ return validated_response
1269
+ except Exception as e:
1270
+ logging.error(f"Agent {self.name}: Guardrail validation failed after reflection: {e}")
1271
+ return None
1070
1272
 
1071
1273
  # Check if we've hit max reflections
1072
1274
  if reflection_count >= self.max_reflect - 1:
@@ -1075,7 +1277,13 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1075
1277
  self.chat_history.append({"role": "user", "content": prompt})
1076
1278
  self.chat_history.append({"role": "assistant", "content": response_text})
1077
1279
  display_interaction(prompt, response_text, markdown=self.markdown, generation_time=time.time() - start_time, console=self.console)
1078
- return response_text
1280
+ # Apply guardrail validation after max reflections
1281
+ try:
1282
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1283
+ return validated_response
1284
+ except Exception as e:
1285
+ logging.error(f"Agent {self.name}: Guardrail validation failed after max reflections: {e}")
1286
+ return None
1079
1287
 
1080
1288
  logging.debug(f"{self.name} reflection count {reflection_count + 1}, continuing reflection process")
1081
1289
  messages.append({"role": "user", "content": "Now regenerate your response using the reflection you made"})
@@ -1099,8 +1307,16 @@ Output MUST be JSON with 'reflection' and 'satisfactory'.
1099
1307
  if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
1100
1308
  total_time = time.time() - start_time
1101
1309
  logging.debug(f"Agent.chat completed in {total_time:.2f} seconds")
1102
-
1103
- return response_text
1310
+
1311
+ # Apply guardrail validation before returning
1312
+ try:
1313
+ validated_response = self._apply_guardrail_with_retry(response_text, prompt, temperature, tools)
1314
+ return validated_response
1315
+ except Exception as e:
1316
+ logging.error(f"Agent {self.name}: Guardrail validation failed: {e}")
1317
+ if self.verbose:
1318
+ display_error(f"Guardrail validation failed: {e}", console=self.console)
1319
+ return None
1104
1320
 
1105
1321
  def clean_json_output(self, output: str) -> str:
1106
1322
  """Clean and extract JSON from response text."""
@@ -1670,6 +1886,7 @@ Your Goal: {self.goal}
1670
1886
  import threading
1671
1887
  import time
1672
1888
  import inspect
1889
+ import asyncio # Import asyncio in the MCP scope
1673
1890
  # logging is already imported at the module level
1674
1891
 
1675
1892
  except ImportError as e: