praisonaiagents 0.0.97__py3-none-any.whl → 0.0.98__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- praisonaiagents/__init__.py +4 -1
- praisonaiagents/agent/agent.py +81 -57
- praisonaiagents/task/task.py +121 -3
- {praisonaiagents-0.0.97.dist-info → praisonaiagents-0.0.98.dist-info}/METADATA +1 -1
- {praisonaiagents-0.0.97.dist-info → praisonaiagents-0.0.98.dist-info}/RECORD +7 -7
- {praisonaiagents-0.0.97.dist-info → praisonaiagents-0.0.98.dist-info}/WHEEL +0 -0
- {praisonaiagents-0.0.97.dist-info → praisonaiagents-0.0.98.dist-info}/top_level.txt +0 -0
praisonaiagents/__init__.py
CHANGED
@@ -12,6 +12,7 @@ from .knowledge.knowledge import Knowledge
|
|
12
12
|
from .knowledge.chunking import Chunking
|
13
13
|
from .mcp.mcp import MCP
|
14
14
|
from .session import Session
|
15
|
+
from .guardrails import GuardrailResult, LLMGuardrail
|
15
16
|
from .main import (
|
16
17
|
TaskOutput,
|
17
18
|
ReflectionOutput,
|
@@ -55,5 +56,7 @@ __all__ = [
|
|
55
56
|
'async_display_callbacks',
|
56
57
|
'Knowledge',
|
57
58
|
'Chunking',
|
58
|
-
'MCP'
|
59
|
+
'MCP',
|
60
|
+
'GuardrailResult',
|
61
|
+
'LLMGuardrail'
|
59
62
|
]
|
praisonaiagents/agent/agent.py
CHANGED
@@ -788,69 +788,92 @@ Your Goal: {self.goal}
|
|
788
788
|
)
|
789
789
|
else:
|
790
790
|
# Use the standard OpenAI client approach
|
791
|
-
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
796
|
-
|
797
|
-
|
798
|
-
|
799
|
-
|
800
|
-
|
801
|
-
|
802
|
-
|
803
|
-
|
804
|
-
|
805
|
-
|
806
|
-
|
807
|
-
|
808
|
-
|
809
|
-
|
810
|
-
|
811
|
-
|
812
|
-
|
813
|
-
|
814
|
-
"role": "assistant",
|
815
|
-
"content": final_response.choices[0].message.content,
|
816
|
-
"tool_calls": tool_calls
|
817
|
-
})
|
791
|
+
# Continue tool execution loop until no more tool calls are needed
|
792
|
+
max_iterations = 10 # Prevent infinite loops
|
793
|
+
iteration_count = 0
|
794
|
+
|
795
|
+
while iteration_count < max_iterations:
|
796
|
+
if stream:
|
797
|
+
# Process as streaming response with formatted tools
|
798
|
+
final_response = self._process_stream_response(
|
799
|
+
messages,
|
800
|
+
temperature,
|
801
|
+
start_time,
|
802
|
+
formatted_tools=formatted_tools if formatted_tools else None,
|
803
|
+
reasoning_steps=reasoning_steps
|
804
|
+
)
|
805
|
+
else:
|
806
|
+
# Process as regular non-streaming response
|
807
|
+
final_response = client.chat.completions.create(
|
808
|
+
model=self.llm,
|
809
|
+
messages=messages,
|
810
|
+
temperature=temperature,
|
811
|
+
tools=formatted_tools if formatted_tools else None,
|
812
|
+
stream=False
|
813
|
+
)
|
818
814
|
|
819
|
-
|
820
|
-
function_name = tool_call.function.name
|
821
|
-
arguments = json.loads(tool_call.function.arguments)
|
815
|
+
tool_calls = getattr(final_response.choices[0].message, 'tool_calls', None)
|
822
816
|
|
823
|
-
if
|
824
|
-
|
817
|
+
if tool_calls:
|
818
|
+
messages.append({
|
819
|
+
"role": "assistant",
|
820
|
+
"content": final_response.choices[0].message.content,
|
821
|
+
"tool_calls": tool_calls
|
822
|
+
})
|
825
823
|
|
826
|
-
|
827
|
-
|
824
|
+
for tool_call in tool_calls:
|
825
|
+
function_name = tool_call.function.name
|
826
|
+
arguments = json.loads(tool_call.function.arguments)
|
828
827
|
|
829
|
-
|
830
|
-
|
828
|
+
if self.verbose:
|
829
|
+
display_tool_call(f"Agent {self.name} is calling function '{function_name}' with arguments: {arguments}")
|
831
830
|
|
832
|
-
|
833
|
-
|
834
|
-
"tool_call_id": tool_call.id,
|
835
|
-
"content": results_str
|
836
|
-
})
|
831
|
+
tool_result = self.execute_tool(function_name, arguments)
|
832
|
+
results_str = json.dumps(tool_result) if tool_result else "Function returned an empty output"
|
837
833
|
|
838
|
-
|
839
|
-
|
840
|
-
|
841
|
-
|
842
|
-
|
843
|
-
|
844
|
-
|
845
|
-
|
846
|
-
|
847
|
-
|
848
|
-
|
849
|
-
|
850
|
-
|
851
|
-
|
852
|
-
|
853
|
-
|
834
|
+
if self.verbose:
|
835
|
+
display_tool_call(f"Function '{function_name}' returned: {results_str}")
|
836
|
+
|
837
|
+
messages.append({
|
838
|
+
"role": "tool",
|
839
|
+
"tool_call_id": tool_call.id,
|
840
|
+
"content": results_str
|
841
|
+
})
|
842
|
+
|
843
|
+
# Check if we should continue (for tools like sequential thinking)
|
844
|
+
should_continue = False
|
845
|
+
for tool_call in tool_calls:
|
846
|
+
function_name = tool_call.function.name
|
847
|
+
arguments = json.loads(tool_call.function.arguments)
|
848
|
+
|
849
|
+
# For sequential thinking tool, check if nextThoughtNeeded is True
|
850
|
+
if function_name == "sequentialthinking" and arguments.get("nextThoughtNeeded", False):
|
851
|
+
should_continue = True
|
852
|
+
break
|
853
|
+
|
854
|
+
if not should_continue:
|
855
|
+
# Get final response after tool calls
|
856
|
+
if stream:
|
857
|
+
final_response = self._process_stream_response(
|
858
|
+
messages,
|
859
|
+
temperature,
|
860
|
+
start_time,
|
861
|
+
formatted_tools=formatted_tools if formatted_tools else None,
|
862
|
+
reasoning_steps=reasoning_steps
|
863
|
+
)
|
864
|
+
else:
|
865
|
+
final_response = client.chat.completions.create(
|
866
|
+
model=self.llm,
|
867
|
+
messages=messages,
|
868
|
+
temperature=temperature,
|
869
|
+
stream=False
|
870
|
+
)
|
871
|
+
break
|
872
|
+
|
873
|
+
iteration_count += 1
|
874
|
+
else:
|
875
|
+
# No tool calls, we're done
|
876
|
+
break
|
854
877
|
|
855
878
|
return final_response
|
856
879
|
|
@@ -1670,6 +1693,7 @@ Your Goal: {self.goal}
|
|
1670
1693
|
import threading
|
1671
1694
|
import time
|
1672
1695
|
import inspect
|
1696
|
+
import asyncio # Import asyncio in the MCP scope
|
1673
1697
|
# logging is already imported at the module level
|
1674
1698
|
|
1675
1699
|
except ImportError as e:
|
praisonaiagents/task/task.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import logging
|
2
2
|
import asyncio
|
3
|
-
|
3
|
+
import inspect
|
4
|
+
from typing import List, Optional, Dict, Any, Type, Callable, Union, Coroutine, Literal, Tuple, get_args, get_origin
|
4
5
|
from pydantic import BaseModel
|
5
6
|
from ..main import TaskOutput
|
6
7
|
from ..agent.agent import Agent
|
@@ -40,7 +41,10 @@ class Task:
|
|
40
41
|
quality_check=True,
|
41
42
|
input_file: Optional[str] = None,
|
42
43
|
rerun: bool = False, # Renamed from can_rerun and logic inverted, default True for backward compatibility
|
43
|
-
retain_full_context: bool = False # By default, only use previous task output, not all previous tasks
|
44
|
+
retain_full_context: bool = False, # By default, only use previous task output, not all previous tasks
|
45
|
+
guardrail: Optional[Union[Callable[[TaskOutput], Tuple[bool, Any]], str]] = None,
|
46
|
+
max_retries: int = 3,
|
47
|
+
retry_count: int = 0
|
44
48
|
):
|
45
49
|
# Add check if memory config is provided
|
46
50
|
if memory is not None or (config and config.get('memory_config')):
|
@@ -80,6 +84,10 @@ class Task:
|
|
80
84
|
self.quality_check = quality_check
|
81
85
|
self.rerun = rerun # Assigning the rerun parameter
|
82
86
|
self.retain_full_context = retain_full_context
|
87
|
+
self.guardrail = guardrail
|
88
|
+
self.max_retries = max_retries
|
89
|
+
self.retry_count = retry_count
|
90
|
+
self._guardrail_fn = None
|
83
91
|
|
84
92
|
# Set logger level based on config verbose level
|
85
93
|
verbose = self.config.get("verbose", 0)
|
@@ -141,6 +149,55 @@ class Task:
|
|
141
149
|
|
142
150
|
self.output_pydantic = LoopModel
|
143
151
|
|
152
|
+
# Initialize guardrail
|
153
|
+
self._setup_guardrail()
|
154
|
+
|
155
|
+
def _setup_guardrail(self):
|
156
|
+
"""Setup the guardrail function based on the provided guardrail parameter."""
|
157
|
+
if self.guardrail is None:
|
158
|
+
self._guardrail_fn = None
|
159
|
+
return
|
160
|
+
|
161
|
+
if callable(self.guardrail):
|
162
|
+
# Validate function signature
|
163
|
+
sig = inspect.signature(self.guardrail)
|
164
|
+
positional_args = [
|
165
|
+
param for param in sig.parameters.values()
|
166
|
+
if param.default is inspect.Parameter.empty
|
167
|
+
]
|
168
|
+
if len(positional_args) != 1:
|
169
|
+
raise ValueError("Guardrail function must accept exactly one parameter (TaskOutput)")
|
170
|
+
|
171
|
+
# Check return annotation if present
|
172
|
+
return_annotation = sig.return_annotation
|
173
|
+
if return_annotation != inspect.Signature.empty:
|
174
|
+
return_annotation_args = get_args(return_annotation)
|
175
|
+
if not (
|
176
|
+
get_origin(return_annotation) is tuple
|
177
|
+
and len(return_annotation_args) == 2
|
178
|
+
and return_annotation_args[0] is bool
|
179
|
+
and (
|
180
|
+
return_annotation_args[1] is Any
|
181
|
+
or return_annotation_args[1] is str
|
182
|
+
or return_annotation_args[1] is TaskOutput
|
183
|
+
or return_annotation_args[1] == Union[str, TaskOutput]
|
184
|
+
)
|
185
|
+
):
|
186
|
+
raise ValueError(
|
187
|
+
"If return type is annotated, it must be Tuple[bool, Any]"
|
188
|
+
)
|
189
|
+
|
190
|
+
self._guardrail_fn = self.guardrail
|
191
|
+
elif isinstance(self.guardrail, str):
|
192
|
+
# Create LLM-based guardrail
|
193
|
+
from ..guardrails import LLMGuardrail
|
194
|
+
if not self.agent:
|
195
|
+
raise ValueError("Agent is required for string-based guardrails")
|
196
|
+
llm = getattr(self.agent, 'llm', None) or getattr(self.agent, 'llm_instance', None)
|
197
|
+
self._guardrail_fn = LLMGuardrail(description=self.guardrail, llm=llm)
|
198
|
+
else:
|
199
|
+
raise ValueError("Guardrail must be either a callable or a string description")
|
200
|
+
|
144
201
|
def __str__(self):
|
145
202
|
return f"Task(name='{self.name if self.name else 'None'}', description='{self.description}', agent='{self.agent.name if self.agent else 'None'}', status='{self.status}')"
|
146
203
|
|
@@ -187,6 +244,37 @@ class Task:
|
|
187
244
|
logger.info(f"Task {self.id}: execute_callback called")
|
188
245
|
logger.info(f"Quality check enabled: {self.quality_check}")
|
189
246
|
|
247
|
+
# Process guardrail if configured
|
248
|
+
if self._guardrail_fn:
|
249
|
+
try:
|
250
|
+
guardrail_result = self._process_guardrail(task_output)
|
251
|
+
if not guardrail_result.success:
|
252
|
+
if self.retry_count >= self.max_retries:
|
253
|
+
raise Exception(
|
254
|
+
f"Task failed guardrail validation after {self.max_retries} retries. "
|
255
|
+
f"Last error: {guardrail_result.error}"
|
256
|
+
)
|
257
|
+
|
258
|
+
self.retry_count += 1
|
259
|
+
logger.warning(f"Task {self.id}: Guardrail validation failed (retry {self.retry_count}/{self.max_retries}): {guardrail_result.error}")
|
260
|
+
# Note: In a real execution, this would trigger a retry, but since this is a callback
|
261
|
+
# the retry logic would need to be handled at the agent/execution level
|
262
|
+
return
|
263
|
+
|
264
|
+
# If guardrail passed and returned a modified result
|
265
|
+
if guardrail_result.result is not None:
|
266
|
+
if isinstance(guardrail_result.result, str):
|
267
|
+
# Update the task output with the modified result
|
268
|
+
task_output.raw = guardrail_result.result
|
269
|
+
elif isinstance(guardrail_result.result, TaskOutput):
|
270
|
+
# Replace with the new task output
|
271
|
+
task_output = guardrail_result.result
|
272
|
+
|
273
|
+
logger.info(f"Task {self.id}: Guardrail validation passed")
|
274
|
+
except Exception as e:
|
275
|
+
logger.error(f"Task {self.id}: Error in guardrail processing: {e}")
|
276
|
+
# Continue execution even if guardrail fails to avoid breaking the task
|
277
|
+
|
190
278
|
# Initialize memory if not already initialized
|
191
279
|
if not self.memory:
|
192
280
|
self.memory = self.initialize_memory()
|
@@ -334,4 +422,34 @@ Context:
|
|
334
422
|
loop.run_until_complete(self.execute_callback(task_output))
|
335
423
|
except RuntimeError:
|
336
424
|
# If no loop is running in this context
|
337
|
-
asyncio.run(self.execute_callback(task_output))
|
425
|
+
asyncio.run(self.execute_callback(task_output))
|
426
|
+
|
427
|
+
def _process_guardrail(self, task_output: TaskOutput):
|
428
|
+
"""Process the guardrail validation for a task output.
|
429
|
+
|
430
|
+
Args:
|
431
|
+
task_output: The task output to validate
|
432
|
+
|
433
|
+
Returns:
|
434
|
+
GuardrailResult: The result of the guardrail validation
|
435
|
+
"""
|
436
|
+
from ..guardrails import GuardrailResult
|
437
|
+
|
438
|
+
if not self._guardrail_fn:
|
439
|
+
return GuardrailResult(success=True, result=task_output)
|
440
|
+
|
441
|
+
try:
|
442
|
+
# Call the guardrail function
|
443
|
+
result = self._guardrail_fn(task_output)
|
444
|
+
|
445
|
+
# Convert the result to a GuardrailResult
|
446
|
+
return GuardrailResult.from_tuple(result)
|
447
|
+
|
448
|
+
except Exception as e:
|
449
|
+
logger.error(f"Task {self.id}: Error in guardrail validation: {e}")
|
450
|
+
# On error, return failure
|
451
|
+
return GuardrailResult(
|
452
|
+
success=False,
|
453
|
+
result=None,
|
454
|
+
error=f"Guardrail validation error: {str(e)}"
|
455
|
+
)
|
@@ -1,9 +1,9 @@
|
|
1
|
-
praisonaiagents/__init__.py,sha256=
|
1
|
+
praisonaiagents/__init__.py,sha256=GmTiMNta4iwmfarh_6cTUpry50hpqFE8YqolrYfZ_7U,1465
|
2
2
|
praisonaiagents/approval.py,sha256=UJ4OhfihpFGR5CAaMphqpSvqdZCHi5w2MGw1MByZ1FQ,9813
|
3
3
|
praisonaiagents/main.py,sha256=_-XE7_Y7ChvtLQMivfNFrrnAhv4wSSDhH9WJMWlkS0w,16315
|
4
4
|
praisonaiagents/session.py,sha256=CI-ffCiOfmgB-1zFFik9daKCB5Sm41Q9ZOaq1-oSLW8,9250
|
5
5
|
praisonaiagents/agent/__init__.py,sha256=j0T19TVNbfZcClvpbZDDinQxZ0oORgsMrMqx16jZ-bA,128
|
6
|
-
praisonaiagents/agent/agent.py,sha256=
|
6
|
+
praisonaiagents/agent/agent.py,sha256=be5E_uEXxZo4xmRRoRQoCpZ964pzlakC_QCOOsG4rCA,88440
|
7
7
|
praisonaiagents/agent/image_agent.py,sha256=-5MXG594HVwSpFMcidt16YBp7udtik-Cp7eXlzLE1fY,8696
|
8
8
|
praisonaiagents/agents/__init__.py,sha256=_1d6Pqyk9EoBSo7E68sKyd1jDRlN1vxvVIRpoMc0Jcw,168
|
9
9
|
praisonaiagents/agents/agents.py,sha256=C_yDdJB4XUuwKA9DrysAtAj3zSYT0IKtfCT4Pxo0oyI,63309
|
@@ -20,7 +20,7 @@ praisonaiagents/memory/memory.py,sha256=6tvsLWkpvyNU-t-8d6XpW7vOFUm1pNReW5B7rA8c
|
|
20
20
|
praisonaiagents/process/__init__.py,sha256=lkYbL7Hn5a0ldvJtkdH23vfIIZLIcanK-65C0MwaorY,52
|
21
21
|
praisonaiagents/process/process.py,sha256=gxhMXG3s4CzaREyuwE5zxCMx2Wp_b_Wd53tDfkj8Qk8,66567
|
22
22
|
praisonaiagents/task/__init__.py,sha256=VL5hXVmyGjINb34AalxpBMl-YW9m5EDcRkMTKkSSl7c,80
|
23
|
-
praisonaiagents/task/task.py,sha256=
|
23
|
+
praisonaiagents/task/task.py,sha256=60JE07JPpRowbEO420f4Ol49e_1AK856wSJDi_ricbg,20531
|
24
24
|
praisonaiagents/tools/__init__.py,sha256=Rrgi7_3-yLHpfBB81WUi0-wD_wb_BsukwHVdjDYAF-0,9316
|
25
25
|
praisonaiagents/tools/arxiv_tools.py,sha256=1stb31zTjLTon4jCnpZG5de9rKc9QWgC0leLegvPXWo,10528
|
26
26
|
praisonaiagents/tools/calculator_tools.py,sha256=S1xPT74Geurvjm52QMMIG29zDXVEWJmM6nmyY7yF298,9571
|
@@ -42,7 +42,7 @@ praisonaiagents/tools/xml_tools.py,sha256=iYTMBEk5l3L3ryQ1fkUnNVYK-Nnua2Kx2S0dxN
|
|
42
42
|
praisonaiagents/tools/yaml_tools.py,sha256=uogAZrhXV9O7xvspAtcTfpKSQYL2nlOTvCQXN94-G9A,14215
|
43
43
|
praisonaiagents/tools/yfinance_tools.py,sha256=s2PBj_1v7oQnOobo2fDbQBACEHl61ftG4beG6Z979ZE,8529
|
44
44
|
praisonaiagents/tools/train/data/generatecot.py,sha256=H6bNh-E2hqL5MW6kX3hqZ05g9ETKN2-kudSjiuU_SD8,19403
|
45
|
-
praisonaiagents-0.0.
|
46
|
-
praisonaiagents-0.0.
|
47
|
-
praisonaiagents-0.0.
|
48
|
-
praisonaiagents-0.0.
|
45
|
+
praisonaiagents-0.0.98.dist-info/METADATA,sha256=CGUF2azfrJRTc_js-tPZRX4-U2MIGe8v9wcCWa0pL6w,1452
|
46
|
+
praisonaiagents-0.0.98.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
47
|
+
praisonaiagents-0.0.98.dist-info/top_level.txt,sha256=_HsRddrJ23iDx5TTqVUVvXG2HeHBL5voshncAMDGjtA,16
|
48
|
+
praisonaiagents-0.0.98.dist-info/RECORD,,
|
File without changes
|
File without changes
|